diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
deleted file mode 100644
index e7db263..0000000
--- a/Godeps/Godeps.json
+++ /dev/null
@@ -1,163 +0,0 @@
-{
- "ImportPath": "github.com/Luzifer/vault-openvpn",
- "GoVersion": "go1.8",
- "GodepVersion": "v79",
- "Deps": [
- {
- "ImportPath": "github.com/Luzifer/rconfig",
- "Comment": "v1.1.0",
- "Rev": "c27bd3a64b5b19556914d9fec69922cf3852d585"
- },
- {
- "ImportPath": "github.com/Sirupsen/logrus",
- "Comment": "v0.10.0-38-g3ec0642",
- "Rev": "3ec0642a7fb6488f65b06f9040adc67e3990296a"
- },
- {
- "ImportPath": "github.com/fatih/structs",
- "Rev": "3fe2facc32a7fbde4b29c0f85604dc1dd22836d2"
- },
- {
- "ImportPath": "github.com/hashicorp/errwrap",
- "Rev": "7554cd9344cec97297fa6649b055a8c98c2a1e55"
- },
- {
- "ImportPath": "github.com/hashicorp/go-cleanhttp",
- "Rev": "ad28ea4487f05916463e2423a55166280e8254b5"
- },
- {
- "ImportPath": "github.com/hashicorp/go-multierror",
- "Rev": "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5"
- },
- {
- "ImportPath": "github.com/hashicorp/go-rootcerts",
- "Rev": "6bb64b370b90e7ef1fa532be9e591a81c3493e00"
- },
- {
- "ImportPath": "github.com/hashicorp/hcl",
- "Rev": "ef8133da8cda503718a74741312bf50821e6de79"
- },
- {
- "ImportPath": "github.com/hashicorp/hcl/hcl/ast",
- "Rev": "ef8133da8cda503718a74741312bf50821e6de79"
- },
- {
- "ImportPath": "github.com/hashicorp/hcl/hcl/parser",
- "Rev": "ef8133da8cda503718a74741312bf50821e6de79"
- },
- {
- "ImportPath": "github.com/hashicorp/hcl/hcl/scanner",
- "Rev": "ef8133da8cda503718a74741312bf50821e6de79"
- },
- {
- "ImportPath": "github.com/hashicorp/hcl/hcl/strconv",
- "Rev": "ef8133da8cda503718a74741312bf50821e6de79"
- },
- {
- "ImportPath": "github.com/hashicorp/hcl/hcl/token",
- "Rev": "ef8133da8cda503718a74741312bf50821e6de79"
- },
- {
- "ImportPath": "github.com/hashicorp/hcl/json/parser",
- "Rev": "ef8133da8cda503718a74741312bf50821e6de79"
- },
- {
- "ImportPath": "github.com/hashicorp/hcl/json/scanner",
- "Rev": "ef8133da8cda503718a74741312bf50821e6de79"
- },
- {
- "ImportPath": "github.com/hashicorp/hcl/json/token",
- "Rev": "ef8133da8cda503718a74741312bf50821e6de79"
- },
- {
- "ImportPath": "github.com/hashicorp/vault/api",
- "Comment": "v0.7.0-190-g4490e93",
- "Rev": "4490e93395fb70c3a25ade1fe88f363561a7d584"
- },
- {
- "ImportPath": "github.com/hashicorp/vault/helper/certutil",
- "Comment": "v0.7.0-190-g4490e93",
- "Rev": "4490e93395fb70c3a25ade1fe88f363561a7d584"
- },
- {
- "ImportPath": "github.com/hashicorp/vault/helper/compressutil",
- "Comment": "v0.7.0-190-g4490e93",
- "Rev": "4490e93395fb70c3a25ade1fe88f363561a7d584"
- },
- {
- "ImportPath": "github.com/hashicorp/vault/helper/errutil",
- "Comment": "v0.7.0-190-g4490e93",
- "Rev": "4490e93395fb70c3a25ade1fe88f363561a7d584"
- },
- {
- "ImportPath": "github.com/hashicorp/vault/helper/jsonutil",
- "Comment": "v0.7.0-190-g4490e93",
- "Rev": "4490e93395fb70c3a25ade1fe88f363561a7d584"
- },
- {
- "ImportPath": "github.com/mattn/go-runewidth",
- "Comment": "v0.0.2-1-g14207d2",
- "Rev": "14207d285c6c197daabb5c9793d63e7af9ab2d50"
- },
- {
- "ImportPath": "github.com/mitchellh/go-homedir",
- "Rev": "981ab348d865cf048eb7d17e78ac7192632d8415"
- },
- {
- "ImportPath": "github.com/mitchellh/mapstructure",
- "Rev": "ca63d7c062ee3c9f34db231e352b60012b4fd0c1"
- },
- {
- "ImportPath": "github.com/olekukonko/tablewriter",
- "Rev": "febf2d34b54a69ce7530036c7503b1c9fbfdf0bb"
- },
- {
- "ImportPath": "github.com/sethgrid/pester",
- "Rev": "4f4c0a67b6496764028e1ab9fd8dfb630282ed2f"
- },
- {
- "ImportPath": "github.com/spf13/pflag",
- "Rev": "c7e63cf4530bcd3ba943729cee0efeff2ebea63f"
- },
- {
- "ImportPath": "golang.org/x/net/http2",
- "Rev": "f09c4662a0bd6bd8943ac7b4931e185df9471da4"
- },
- {
- "ImportPath": "golang.org/x/net/http2/hpack",
- "Rev": "f09c4662a0bd6bd8943ac7b4931e185df9471da4"
- },
- {
- "ImportPath": "golang.org/x/net/idna",
- "Rev": "f09c4662a0bd6bd8943ac7b4931e185df9471da4"
- },
- {
- "ImportPath": "golang.org/x/net/lex/httplex",
- "Rev": "f09c4662a0bd6bd8943ac7b4931e185df9471da4"
- },
- {
- "ImportPath": "golang.org/x/sys/unix",
- "Rev": "8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9"
- },
- {
- "ImportPath": "golang.org/x/text/secure/bidirule",
- "Rev": "2df9074612f50810d82416d2229398a1e7188c5c"
- },
- {
- "ImportPath": "golang.org/x/text/transform",
- "Rev": "2df9074612f50810d82416d2229398a1e7188c5c"
- },
- {
- "ImportPath": "golang.org/x/text/unicode/bidi",
- "Rev": "2df9074612f50810d82416d2229398a1e7188c5c"
- },
- {
- "ImportPath": "golang.org/x/text/unicode/norm",
- "Rev": "2df9074612f50810d82416d2229398a1e7188c5c"
- },
- {
- "ImportPath": "gopkg.in/yaml.v2",
- "Rev": "31c299268d302dd0aa9a0dcf765a3d58971ac83f"
- }
- ]
-}
diff --git a/Godeps/Readme b/Godeps/Readme
deleted file mode 100644
index 4cdaa53..0000000
--- a/Godeps/Readme
+++ /dev/null
@@ -1,5 +0,0 @@
-This directory tree is generated automatically by godep.
-
-Please do not edit.
-
-See https://github.com/tools/godep for more information.
diff --git a/Gopkg.lock b/Gopkg.lock
new file mode 100644
index 0000000..fd3ab1d
--- /dev/null
+++ b/Gopkg.lock
@@ -0,0 +1,102 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+ name = "github.com/Luzifer/rconfig"
+ packages = ["."]
+ revision = "c27bd3a64b5b19556914d9fec69922cf3852d585"
+ version = "v1.1.0"
+
+[[projects]]
+ name = "github.com/Sirupsen/logrus"
+ packages = ["."]
+ revision = "3ec0642a7fb6488f65b06f9040adc67e3990296a"
+
+[[projects]]
+ name = "github.com/fatih/structs"
+ packages = ["."]
+ revision = "3fe2facc32a7fbde4b29c0f85604dc1dd22836d2"
+
+[[projects]]
+ branch = "master"
+ name = "github.com/hashicorp/errwrap"
+ packages = ["."]
+ revision = "7554cd9344cec97297fa6649b055a8c98c2a1e55"
+
+[[projects]]
+ name = "github.com/hashicorp/go-cleanhttp"
+ packages = ["."]
+ revision = "ad28ea4487f05916463e2423a55166280e8254b5"
+
+[[projects]]
+ name = "github.com/hashicorp/go-multierror"
+ packages = ["."]
+ revision = "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5"
+
+[[projects]]
+ branch = "master"
+ name = "github.com/hashicorp/go-rootcerts"
+ packages = ["."]
+ revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00"
+
+[[projects]]
+ name = "github.com/hashicorp/hcl"
+ packages = [".","hcl/ast","hcl/parser","hcl/scanner","hcl/strconv","hcl/token","json/parser","json/scanner","json/token"]
+ revision = "ef8133da8cda503718a74741312bf50821e6de79"
+
+[[projects]]
+ name = "github.com/hashicorp/vault"
+ packages = ["api","helper/certutil","helper/compressutil","helper/errutil","helper/jsonutil"]
+ revision = "4490e93395fb70c3a25ade1fe88f363561a7d584"
+
+[[projects]]
+ name = "github.com/mattn/go-runewidth"
+ packages = ["."]
+ revision = "14207d285c6c197daabb5c9793d63e7af9ab2d50"
+
+[[projects]]
+ name = "github.com/mitchellh/go-homedir"
+ packages = ["."]
+ revision = "981ab348d865cf048eb7d17e78ac7192632d8415"
+
+[[projects]]
+ name = "github.com/mitchellh/mapstructure"
+ packages = ["."]
+ revision = "ca63d7c062ee3c9f34db231e352b60012b4fd0c1"
+
+[[projects]]
+ name = "github.com/olekukonko/tablewriter"
+ packages = ["."]
+ revision = "febf2d34b54a69ce7530036c7503b1c9fbfdf0bb"
+
+[[projects]]
+ name = "github.com/sethgrid/pester"
+ packages = ["."]
+ revision = "4f4c0a67b6496764028e1ab9fd8dfb630282ed2f"
+
+[[projects]]
+ name = "github.com/spf13/pflag"
+ packages = ["."]
+ revision = "c7e63cf4530bcd3ba943729cee0efeff2ebea63f"
+
+[[projects]]
+ name = "golang.org/x/net"
+ packages = ["http2","http2/hpack","idna","lex/httplex"]
+ revision = "f09c4662a0bd6bd8943ac7b4931e185df9471da4"
+
+[[projects]]
+ name = "golang.org/x/sys"
+ packages = ["unix"]
+ revision = "8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9"
+
+[[projects]]
+ name = "gopkg.in/yaml.v2"
+ packages = ["."]
+ revision = "31c299268d302dd0aa9a0dcf765a3d58971ac83f"
+
+[solve-meta]
+ analyzer-name = "dep"
+ analyzer-version = 1
+ inputs-digest = "338132dcce86eed0dc26d5c2a448092d59a299761c47da97aed0c6e98e8c355d"
+ solver-name = "gps-cdcl"
+ solver-version = 1
diff --git a/Gopkg.toml b/Gopkg.toml
new file mode 100644
index 0000000..7cb6546
--- /dev/null
+++ b/Gopkg.toml
@@ -0,0 +1,41 @@
+
+# Gopkg.toml example
+#
+# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
+# for detailed Gopkg.toml documentation.
+#
+# required = ["github.com/user/thing/cmd/thing"]
+# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
+#
+# [[constraint]]
+# name = "github.com/user/project"
+# version = "1.0.0"
+#
+# [[constraint]]
+# name = "github.com/user/project2"
+# branch = "dev"
+# source = "github.com/myfork/project2"
+#
+# [[override]]
+# name = "github.com/x/y"
+# version = "2.4.0"
+
+
+[[constraint]]
+ name = "github.com/Luzifer/rconfig"
+ version = "1.1.0"
+
+[[constraint]]
+ name = "github.com/Sirupsen/logrus"
+
+[[constraint]]
+ name = "github.com/hashicorp/vault"
+
+[[constraint]]
+ name = "github.com/mitchellh/go-homedir"
+
+[[constraint]]
+ name = "github.com/olekukonko/tablewriter"
+
+[[constraint]]
+ name = "gopkg.in/yaml.v2"
diff --git a/vendor/github.com/Luzifer/rconfig/bool_test.go b/vendor/github.com/Luzifer/rconfig/bool_test.go
new file mode 100644
index 0000000..11a6f4b
--- /dev/null
+++ b/vendor/github.com/Luzifer/rconfig/bool_test.go
@@ -0,0 +1,70 @@
+package rconfig
+
+import (
+ "os"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Testing bool parsing", func() {
+ type t struct {
+ Test1 bool `default:"true"`
+ Test2 bool `default:"false" flag:"test2"`
+ Test3 bool `default:"true" flag:"test3,t"`
+ Test4 bool `flag:"test4"`
+ }
+
+ var (
+ err error
+ args []string
+ cfg t
+ )
+
+ BeforeEach(func() {
+ cfg = t{}
+ args = []string{
+ "--test2",
+ "-t",
+ }
+ })
+
+ JustBeforeEach(func() {
+ err = parse(&cfg, args)
+ })
+
+ It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
+ It("should have the expected values", func() {
+ Expect(cfg.Test1).To(Equal(true))
+ Expect(cfg.Test2).To(Equal(true))
+ Expect(cfg.Test3).To(Equal(true))
+ Expect(cfg.Test4).To(Equal(false))
+ })
+})
+
+var _ = Describe("Testing to set bool from ENV with default", func() {
+ type t struct {
+ Test1 bool `default:"true" env:"TEST1"`
+ }
+
+ var (
+ err error
+ args []string
+ cfg t
+ )
+
+ BeforeEach(func() {
+ cfg = t{}
+ args = []string{}
+ })
+
+ JustBeforeEach(func() {
+ os.Unsetenv("TEST1")
+ err = parse(&cfg, args)
+ })
+
+ It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
+ It("should have the expected values", func() {
+ Expect(cfg.Test1).To(Equal(true))
+ })
+})
diff --git a/vendor/github.com/Luzifer/rconfig/duration_test.go b/vendor/github.com/Luzifer/rconfig/duration_test.go
new file mode 100644
index 0000000..1ca95dc
--- /dev/null
+++ b/vendor/github.com/Luzifer/rconfig/duration_test.go
@@ -0,0 +1,41 @@
+package rconfig
+
+import (
+ "time"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Duration", func() {
+ type t struct {
+ Test time.Duration `flag:"duration"`
+ TestS time.Duration `flag:"other-duration,o"`
+ TestDef time.Duration `default:"30h"`
+ }
+
+ var (
+ err error
+ args []string
+ cfg t
+ )
+
+ BeforeEach(func() {
+ cfg = t{}
+ args = []string{
+ "--duration=23s", "-o", "45m",
+ }
+ })
+
+ JustBeforeEach(func() {
+ err = parse(&cfg, args)
+ })
+
+ It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
+ It("should have the expected values", func() {
+ Expect(cfg.Test).To(Equal(23 * time.Second))
+ Expect(cfg.TestS).To(Equal(45 * time.Minute))
+
+ Expect(cfg.TestDef).To(Equal(30 * time.Hour))
+ })
+})
diff --git a/vendor/github.com/Luzifer/rconfig/errors_test.go b/vendor/github.com/Luzifer/rconfig/errors_test.go
new file mode 100644
index 0000000..46db039
--- /dev/null
+++ b/vendor/github.com/Luzifer/rconfig/errors_test.go
@@ -0,0 +1,56 @@
+package rconfig
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Testing errors", func() {
+
+ It("should not accept string as int", func() {
+ Expect(parse(&struct {
+ A int `default:"a"`
+ }{}, []string{})).To(HaveOccurred())
+ })
+
+ It("should not accept string as float", func() {
+ Expect(parse(&struct {
+ A float32 `default:"a"`
+ }{}, []string{})).To(HaveOccurred())
+ })
+
+ It("should not accept string as uint", func() {
+ Expect(parse(&struct {
+ A uint `default:"a"`
+ }{}, []string{})).To(HaveOccurred())
+ })
+
+ It("should not accept string as uint in sub-struct", func() {
+ Expect(parse(&struct {
+ B struct {
+ A uint `default:"a"`
+ }
+ }{}, []string{})).To(HaveOccurred())
+ })
+
+ It("should not accept string slice as int slice", func() {
+ Expect(parse(&struct {
+ A []int `default:"a,bn"`
+ }{}, []string{})).To(HaveOccurred())
+ })
+
+ It("should not accept variables not being pointers", func() {
+ cfg := struct {
+ A string `default:"a"`
+ }{}
+
+ Expect(parse(cfg, []string{})).To(HaveOccurred())
+ })
+
+ It("should not accept variables not being pointers to structs", func() {
+ cfg := "test"
+
+ Expect(parse(cfg, []string{})).To(HaveOccurred())
+ })
+
+})
diff --git a/vendor/github.com/Luzifer/rconfig/example_test.go b/vendor/github.com/Luzifer/rconfig/example_test.go
new file mode 100644
index 0000000..0a65b2f
--- /dev/null
+++ b/vendor/github.com/Luzifer/rconfig/example_test.go
@@ -0,0 +1,37 @@
+package rconfig
+
+import (
+ "fmt"
+ "os"
+)
+
+func ExampleParse() {
+ // We're building an example configuration with a sub-struct to be filled
+ // by the Parse command.
+ config := struct {
+ Username string `default:"unknown" flag:"user,u" description:"Your name"`
+ Details struct {
+ Age int `default:"25" flag:"age" description:"Your age"`
+ }
+ }{}
+
+ // To have more relieable results we're setting os.Args to a known value.
+ // In real-life use cases you wouldn't do this but parse the original
+ // commandline arguments.
+ os.Args = []string{
+ "example",
+ "--user=Luzifer",
+ }
+
+ Parse(&config)
+
+ fmt.Printf("Hello %s, happy birthday for your %dth birthday.",
+ config.Username,
+ config.Details.Age)
+
+ // You can also show an usage message for your user
+ Usage()
+
+ // Output:
+ // Hello Luzifer, happy birthday for your 25th birthday.
+}
diff --git a/vendor/github.com/Luzifer/rconfig/float_test.go b/vendor/github.com/Luzifer/rconfig/float_test.go
new file mode 100644
index 0000000..4ec8a1e
--- /dev/null
+++ b/vendor/github.com/Luzifer/rconfig/float_test.go
@@ -0,0 +1,44 @@
+package rconfig
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Testing float parsing", func() {
+ type t struct {
+ Test32 float32 `flag:"float32"`
+ Test32P float32 `flag:"float32p,3"`
+ Test64 float64 `flag:"float64"`
+ Test64P float64 `flag:"float64p,6"`
+ TestDef float32 `default:"66.256"`
+ }
+
+ var (
+ err error
+ args []string
+ cfg t
+ )
+
+ BeforeEach(func() {
+ cfg = t{}
+ args = []string{
+ "--float32=5.5", "-3", "6.6",
+ "--float64=7.7", "-6", "8.8",
+ }
+ })
+
+ JustBeforeEach(func() {
+ err = parse(&cfg, args)
+ })
+
+ It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
+ It("should have the expected values", func() {
+ Expect(cfg.Test32).To(Equal(float32(5.5)))
+ Expect(cfg.Test32P).To(Equal(float32(6.6)))
+ Expect(cfg.Test64).To(Equal(float64(7.7)))
+ Expect(cfg.Test64P).To(Equal(float64(8.8)))
+
+ Expect(cfg.TestDef).To(Equal(float32(66.256)))
+ })
+})
diff --git a/vendor/github.com/Luzifer/rconfig/general_test.go b/vendor/github.com/Luzifer/rconfig/general_test.go
new file mode 100644
index 0000000..d9ff8fe
--- /dev/null
+++ b/vendor/github.com/Luzifer/rconfig/general_test.go
@@ -0,0 +1,109 @@
+package rconfig
+
+import (
+ "os"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Testing general parsing", func() {
+ type t struct {
+ Test string `default:"foo" env:"shell" flag:"shell" description:"Test"`
+ Test2 string `default:"blub" env:"testvar" flag:"testvar,t" description:"Test"`
+ DefaultFlag string `default:"goo"`
+ SadFlag string
+ }
+
+ var (
+ err error
+ args []string
+ cfg t
+ )
+
+ Context("with defined arguments", func() {
+ BeforeEach(func() {
+ cfg = t{}
+ args = []string{
+ "--shell=test23",
+ "-t", "bla",
+ }
+ })
+
+ JustBeforeEach(func() {
+ err = parse(&cfg, args)
+ })
+
+ It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
+ It("should have parsed the expected values", func() {
+ Expect(cfg.Test).To(Equal("test23"))
+ Expect(cfg.Test2).To(Equal("bla"))
+ Expect(cfg.SadFlag).To(Equal(""))
+ Expect(cfg.DefaultFlag).To(Equal("goo"))
+ })
+ })
+
+ Context("with no arguments", func() {
+ BeforeEach(func() {
+ cfg = t{}
+ args = []string{}
+ })
+
+ JustBeforeEach(func() {
+ err = parse(&cfg, args)
+ })
+
+ It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
+ It("should have used the default value", func() {
+ Expect(cfg.Test).To(Equal("foo"))
+ })
+ })
+
+ Context("with no arguments and set env", func() {
+ BeforeEach(func() {
+ cfg = t{}
+ args = []string{}
+ os.Setenv("shell", "test546")
+ })
+
+ AfterEach(func() {
+ os.Unsetenv("shell")
+ })
+
+ JustBeforeEach(func() {
+ err = parse(&cfg, args)
+ })
+
+ It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
+ It("should have used the value from env", func() {
+ Expect(cfg.Test).To(Equal("test546"))
+ })
+ })
+
+ Context("with additional arguments", func() {
+ BeforeEach(func() {
+ cfg = t{}
+ args = []string{
+ "--shell=test23",
+ "-t", "bla",
+ "positional1", "positional2",
+ }
+ })
+
+ JustBeforeEach(func() {
+ err = parse(&cfg, args)
+ })
+
+ It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
+ It("should have parsed the expected values", func() {
+ Expect(cfg.Test).To(Equal("test23"))
+ Expect(cfg.Test2).To(Equal("bla"))
+ Expect(cfg.SadFlag).To(Equal(""))
+ Expect(cfg.DefaultFlag).To(Equal("goo"))
+ })
+ It("should have detected the positional arguments", func() {
+ Expect(Args()).To(Equal([]string{"positional1", "positional2"}))
+ })
+ })
+
+})
diff --git a/vendor/github.com/Luzifer/rconfig/int_test.go b/vendor/github.com/Luzifer/rconfig/int_test.go
new file mode 100644
index 0000000..2cc0022
--- /dev/null
+++ b/vendor/github.com/Luzifer/rconfig/int_test.go
@@ -0,0 +1,54 @@
+package rconfig
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Testing int parsing", func() {
+ type t struct {
+ Test int `flag:"int"`
+ TestP int `flag:"intp,i"`
+ Test8 int8 `flag:"int8"`
+ Test8P int8 `flag:"int8p,8"`
+ Test32 int32 `flag:"int32"`
+ Test32P int32 `flag:"int32p,3"`
+ Test64 int64 `flag:"int64"`
+ Test64P int64 `flag:"int64p,6"`
+ TestDef int8 `default:"66"`
+ }
+
+ var (
+ err error
+ args []string
+ cfg t
+ )
+
+ BeforeEach(func() {
+ cfg = t{}
+ args = []string{
+ "--int=1", "-i", "2",
+ "--int8=3", "-8", "4",
+ "--int32=5", "-3", "6",
+ "--int64=7", "-6", "8",
+ }
+ })
+
+ JustBeforeEach(func() {
+ err = parse(&cfg, args)
+ })
+
+ It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
+ It("should have the expected values", func() {
+ Expect(cfg.Test).To(Equal(1))
+ Expect(cfg.TestP).To(Equal(2))
+ Expect(cfg.Test8).To(Equal(int8(3)))
+ Expect(cfg.Test8P).To(Equal(int8(4)))
+ Expect(cfg.Test32).To(Equal(int32(5)))
+ Expect(cfg.Test32P).To(Equal(int32(6)))
+ Expect(cfg.Test64).To(Equal(int64(7)))
+ Expect(cfg.Test64P).To(Equal(int64(8)))
+
+ Expect(cfg.TestDef).To(Equal(int8(66)))
+ })
+})
diff --git a/vendor/github.com/Luzifer/rconfig/os-args_test.go b/vendor/github.com/Luzifer/rconfig/os-args_test.go
new file mode 100644
index 0000000..eacee71
--- /dev/null
+++ b/vendor/github.com/Luzifer/rconfig/os-args_test.go
@@ -0,0 +1,40 @@
+package rconfig_test
+
+import (
+ "os"
+
+ . "github.com/Luzifer/rconfig"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Testing os.Args", func() {
+ type t struct {
+ A string `default:"a" flag:"a"`
+ }
+
+ var (
+ err error
+ cfg t
+ )
+
+ JustBeforeEach(func() {
+ err = Parse(&cfg)
+ })
+
+ Context("With only valid arguments", func() {
+
+ BeforeEach(func() {
+ cfg = t{}
+ os.Args = []string{"--a=bar"}
+ })
+
+ It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
+ It("should have the expected values", func() {
+ Expect(cfg.A).To(Equal("bar"))
+ })
+
+ })
+
+})
diff --git a/vendor/github.com/Luzifer/rconfig/precedence_test.go b/vendor/github.com/Luzifer/rconfig/precedence_test.go
new file mode 100644
index 0000000..6d87ca0
--- /dev/null
+++ b/vendor/github.com/Luzifer/rconfig/precedence_test.go
@@ -0,0 +1,87 @@
+package rconfig
+
+import (
+ "os"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Precedence", func() {
+
+ type t struct {
+ A int `default:"1" vardefault:"a" env:"a" flag:"avar,a" description:"a"`
+ }
+
+ var (
+ err error
+ cfg t
+ args []string
+ vardefaults map[string]string
+ )
+
+ JustBeforeEach(func() {
+ cfg = t{}
+ SetVariableDefaults(vardefaults)
+ err = parse(&cfg, args)
+ })
+
+ Context("Provided: Flag, Env, Default, VarDefault", func() {
+ BeforeEach(func() {
+ args = []string{"-a", "5"}
+ os.Setenv("a", "8")
+ vardefaults = map[string]string{
+ "a": "3",
+ }
+ })
+
+ It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
+ It("should have used the flag value", func() {
+ Expect(cfg.A).To(Equal(5))
+ })
+ })
+
+ Context("Provided: Env, Default, VarDefault", func() {
+ BeforeEach(func() {
+ args = []string{}
+ os.Setenv("a", "8")
+ vardefaults = map[string]string{
+ "a": "3",
+ }
+ })
+
+ It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
+ It("should have used the env value", func() {
+ Expect(cfg.A).To(Equal(8))
+ })
+ })
+
+ Context("Provided: Default, VarDefault", func() {
+ BeforeEach(func() {
+ args = []string{}
+ os.Unsetenv("a")
+ vardefaults = map[string]string{
+ "a": "3",
+ }
+ })
+
+ It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
+ It("should have used the vardefault value", func() {
+ Expect(cfg.A).To(Equal(3))
+ })
+ })
+
+ Context("Provided: Default", func() {
+ BeforeEach(func() {
+ args = []string{}
+ os.Unsetenv("a")
+ vardefaults = map[string]string{}
+ })
+
+ It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
+ It("should have used the default value", func() {
+ Expect(cfg.A).To(Equal(1))
+ })
+ })
+
+})
diff --git a/vendor/github.com/Luzifer/rconfig/rconfig_suite_test.go b/vendor/github.com/Luzifer/rconfig/rconfig_suite_test.go
new file mode 100644
index 0000000..72c9ce4
--- /dev/null
+++ b/vendor/github.com/Luzifer/rconfig/rconfig_suite_test.go
@@ -0,0 +1,13 @@
+package rconfig_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "testing"
+)
+
+func TestRconfig(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Rconfig Suite")
+}
diff --git a/vendor/github.com/Luzifer/rconfig/slice_test.go b/vendor/github.com/Luzifer/rconfig/slice_test.go
new file mode 100644
index 0000000..7d9524e
--- /dev/null
+++ b/vendor/github.com/Luzifer/rconfig/slice_test.go
@@ -0,0 +1,51 @@
+package rconfig
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Testing slices", func() {
+ type t struct {
+ Int []int `default:"1,2,3" flag:"int"`
+ String []string `default:"a,b,c" flag:"string"`
+ IntP []int `default:"1,2,3" flag:"intp,i"`
+ StringP []string `default:"a,b,c" flag:"stringp,s"`
+ }
+
+ var (
+ err error
+ args []string
+ cfg t
+ )
+
+ BeforeEach(func() {
+ cfg = t{}
+ args = []string{
+ "--int=4,5", "-s", "hallo,welt",
+ }
+ })
+
+ JustBeforeEach(func() {
+ err = parse(&cfg, args)
+ })
+
+ It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
+ It("should have the expected values for int-slice", func() {
+ Expect(len(cfg.Int)).To(Equal(2))
+ Expect(cfg.Int).To(Equal([]int{4, 5}))
+ Expect(cfg.Int).NotTo(Equal([]int{5, 4}))
+ })
+ It("should have the expected values for int-shorthand-slice", func() {
+ Expect(len(cfg.IntP)).To(Equal(3))
+ Expect(cfg.IntP).To(Equal([]int{1, 2, 3}))
+ })
+ It("should have the expected values for string-slice", func() {
+ Expect(len(cfg.String)).To(Equal(3))
+ Expect(cfg.String).To(Equal([]string{"a", "b", "c"}))
+ })
+ It("should have the expected values for string-shorthand-slice", func() {
+ Expect(len(cfg.StringP)).To(Equal(2))
+ Expect(cfg.StringP).To(Equal([]string{"hallo", "welt"}))
+ })
+})
diff --git a/vendor/github.com/Luzifer/rconfig/sub-struct_test.go b/vendor/github.com/Luzifer/rconfig/sub-struct_test.go
new file mode 100644
index 0000000..cfbfbc2
--- /dev/null
+++ b/vendor/github.com/Luzifer/rconfig/sub-struct_test.go
@@ -0,0 +1,36 @@
+package rconfig
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Testing sub-structs", func() {
+ type t struct {
+ Test string `default:"blubb"`
+ Sub struct {
+ Test string `default:"Hallo"`
+ }
+ }
+
+ var (
+ err error
+ args []string
+ cfg t
+ )
+
+ BeforeEach(func() {
+ cfg = t{}
+ args = []string{}
+ })
+
+ JustBeforeEach(func() {
+ err = parse(&cfg, args)
+ })
+
+ It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
+ It("should have the expected values", func() {
+ Expect(cfg.Test).To(Equal("blubb"))
+ Expect(cfg.Sub.Test).To(Equal("Hallo"))
+ })
+})
diff --git a/vendor/github.com/Luzifer/rconfig/uint_test.go b/vendor/github.com/Luzifer/rconfig/uint_test.go
new file mode 100644
index 0000000..886db1d
--- /dev/null
+++ b/vendor/github.com/Luzifer/rconfig/uint_test.go
@@ -0,0 +1,59 @@
+package rconfig
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Testing uint parsing", func() {
+ type t struct {
+ Test uint `flag:"int"`
+ TestP uint `flag:"intp,i"`
+ Test8 uint8 `flag:"int8"`
+ Test8P uint8 `flag:"int8p,8"`
+ Test16 uint16 `flag:"int16"`
+ Test16P uint16 `flag:"int16p,1"`
+ Test32 uint32 `flag:"int32"`
+ Test32P uint32 `flag:"int32p,3"`
+ Test64 uint64 `flag:"int64"`
+ Test64P uint64 `flag:"int64p,6"`
+ TestDef uint8 `default:"66"`
+ }
+
+ var (
+ err error
+ args []string
+ cfg t
+ )
+
+ BeforeEach(func() {
+ cfg = t{}
+ args = []string{
+ "--int=1", "-i", "2",
+ "--int8=3", "-8", "4",
+ "--int32=5", "-3", "6",
+ "--int64=7", "-6", "8",
+ "--int16=9", "-1", "10",
+ }
+ })
+
+ JustBeforeEach(func() {
+ err = parse(&cfg, args)
+ })
+
+ It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
+ It("should have the expected values", func() {
+ Expect(cfg.Test).To(Equal(uint(1)))
+ Expect(cfg.TestP).To(Equal(uint(2)))
+ Expect(cfg.Test8).To(Equal(uint8(3)))
+ Expect(cfg.Test8P).To(Equal(uint8(4)))
+ Expect(cfg.Test32).To(Equal(uint32(5)))
+ Expect(cfg.Test32P).To(Equal(uint32(6)))
+ Expect(cfg.Test64).To(Equal(uint64(7)))
+ Expect(cfg.Test64P).To(Equal(uint64(8)))
+ Expect(cfg.Test16).To(Equal(uint16(9)))
+ Expect(cfg.Test16P).To(Equal(uint16(10)))
+
+ Expect(cfg.TestDef).To(Equal(uint8(66)))
+ })
+})
diff --git a/vendor/github.com/Luzifer/rconfig/vardefault_test.go b/vendor/github.com/Luzifer/rconfig/vardefault_test.go
new file mode 100644
index 0000000..8328919
--- /dev/null
+++ b/vendor/github.com/Luzifer/rconfig/vardefault_test.go
@@ -0,0 +1,122 @@
+package rconfig
+
+import (
+ "io/ioutil"
+ "os"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Testing variable defaults", func() {
+
+ type t struct {
+ MySecretValue string `default:"secret" env:"foo" vardefault:"my_secret_value"`
+ MyUsername string `default:"luzifer" vardefault:"username"`
+ SomeVar string `flag:"var" description:"some variable"`
+ IntVar int64 `vardefault:"int_var" default:"23"`
+ }
+
+ var (
+ err error
+ cfg t
+ args = []string{}
+ vardefaults = map[string]string{
+ "my_secret_value": "veryverysecretkey",
+ "unkownkey": "hi there",
+ "int_var": "42",
+ }
+ )
+
+ BeforeEach(func() {
+ cfg = t{}
+ })
+
+ JustBeforeEach(func() {
+ err = parse(&cfg, args)
+ })
+
+ Context("With manually provided variables", func() {
+ BeforeEach(func() {
+ SetVariableDefaults(vardefaults)
+ })
+
+ It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
+ It("should have the expected values", func() {
+ Expect(cfg.IntVar).To(Equal(int64(42)))
+ Expect(cfg.MySecretValue).To(Equal("veryverysecretkey"))
+ Expect(cfg.MyUsername).To(Equal("luzifer"))
+ Expect(cfg.SomeVar).To(Equal(""))
+ })
+ })
+
+ Context("With defaults from YAML data", func() {
+ BeforeEach(func() {
+ yamlData := []byte("---\nmy_secret_value: veryverysecretkey\nunknownkey: hi there\nint_var: 42\n")
+ SetVariableDefaults(VarDefaultsFromYAML(yamlData))
+ })
+
+ It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
+ It("should have the expected values", func() {
+ Expect(cfg.IntVar).To(Equal(int64(42)))
+ Expect(cfg.MySecretValue).To(Equal("veryverysecretkey"))
+ Expect(cfg.MyUsername).To(Equal("luzifer"))
+ Expect(cfg.SomeVar).To(Equal(""))
+ })
+ })
+
+ Context("With defaults from YAML file", func() {
+ var tmp *os.File
+
+ BeforeEach(func() {
+ tmp, _ = ioutil.TempFile("", "")
+ yamlData := "---\nmy_secret_value: veryverysecretkey\nunknownkey: hi there\nint_var: 42\n"
+ tmp.WriteString(yamlData)
+ SetVariableDefaults(VarDefaultsFromYAMLFile(tmp.Name()))
+ })
+
+ AfterEach(func() {
+ tmp.Close()
+ os.Remove(tmp.Name())
+ })
+
+ It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
+ It("should have the expected values", func() {
+ Expect(cfg.IntVar).To(Equal(int64(42)))
+ Expect(cfg.MySecretValue).To(Equal("veryverysecretkey"))
+ Expect(cfg.MyUsername).To(Equal("luzifer"))
+ Expect(cfg.SomeVar).To(Equal(""))
+ })
+ })
+
+ Context("With defaults from invalid YAML data", func() {
+ BeforeEach(func() {
+ yamlData := []byte("---\nmy_secret_value = veryverysecretkey\nunknownkey = hi there\nint_var = 42\n")
+ SetVariableDefaults(VarDefaultsFromYAML(yamlData))
+ })
+
+ It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
+ It("should have the expected values", func() {
+ Expect(cfg.IntVar).To(Equal(int64(23)))
+ Expect(cfg.MySecretValue).To(Equal("secret"))
+ Expect(cfg.MyUsername).To(Equal("luzifer"))
+ Expect(cfg.SomeVar).To(Equal(""))
+ })
+ })
+
+ Context("With defaults from non existent YAML file", func() {
+ BeforeEach(func() {
+ file := "/tmp/this_file_should_not_exist_146e26723r"
+ SetVariableDefaults(VarDefaultsFromYAMLFile(file))
+ })
+
+ It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
+ It("should have the expected values", func() {
+ Expect(cfg.IntVar).To(Equal(int64(23)))
+ Expect(cfg.MySecretValue).To(Equal("secret"))
+ Expect(cfg.MyUsername).To(Equal("luzifer"))
+ Expect(cfg.SomeVar).To(Equal(""))
+ })
+ })
+
+})
diff --git a/vendor/github.com/Sirupsen/logrus/alt_exit_test.go b/vendor/github.com/Sirupsen/logrus/alt_exit_test.go
new file mode 100644
index 0000000..022b778
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/alt_exit_test.go
@@ -0,0 +1,74 @@
+package logrus
+
+import (
+ "io/ioutil"
+ "os/exec"
+ "testing"
+ "time"
+)
+
+func TestRegister(t *testing.T) {
+ current := len(handlers)
+ RegisterExitHandler(func() {})
+ if len(handlers) != current+1 {
+ t.Fatalf("can't add handler")
+ }
+}
+
+func TestHandler(t *testing.T) {
+ gofile := "/tmp/testprog.go"
+ if err := ioutil.WriteFile(gofile, testprog, 0666); err != nil {
+ t.Fatalf("can't create go file")
+ }
+
+ outfile := "/tmp/testprog.out"
+ arg := time.Now().UTC().String()
+ err := exec.Command("go", "run", gofile, outfile, arg).Run()
+ if err == nil {
+ t.Fatalf("completed normally, should have failed")
+ }
+
+ data, err := ioutil.ReadFile(outfile)
+ if err != nil {
+ t.Fatalf("can't read output file %s", outfile)
+ }
+
+ if string(data) != arg {
+ t.Fatalf("bad data")
+ }
+}
+
+var testprog = []byte(`
+// Test program for atexit, gets output file and data as arguments and writes
+// data to output file in atexit handler.
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+ "flag"
+ "fmt"
+ "io/ioutil"
+)
+
+var outfile = ""
+var data = ""
+
+func handler() {
+ ioutil.WriteFile(outfile, []byte(data), 0666)
+}
+
+func badHandler() {
+ n := 0
+ fmt.Println(1/n)
+}
+
+func main() {
+ flag.Parse()
+ outfile = flag.Arg(0)
+ data = flag.Arg(1)
+
+ logrus.RegisterExitHandler(handler)
+ logrus.RegisterExitHandler(badHandler)
+ logrus.Fatal("Bye bye")
+}
+`)
diff --git a/vendor/github.com/Sirupsen/logrus/entry_test.go b/vendor/github.com/Sirupsen/logrus/entry_test.go
new file mode 100644
index 0000000..99c3b41
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/entry_test.go
@@ -0,0 +1,77 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestEntryWithError(t *testing.T) {
+
+ assert := assert.New(t)
+
+ defer func() {
+ ErrorKey = "error"
+ }()
+
+ err := fmt.Errorf("kaboom at layer %d", 4711)
+
+ assert.Equal(err, WithError(err).Data["error"])
+
+ logger := New()
+ logger.Out = &bytes.Buffer{}
+ entry := NewEntry(logger)
+
+ assert.Equal(err, entry.WithError(err).Data["error"])
+
+ ErrorKey = "err"
+
+ assert.Equal(err, entry.WithError(err).Data["err"])
+
+}
+
+func TestEntryPanicln(t *testing.T) {
+ errBoom := fmt.Errorf("boom time")
+
+ defer func() {
+ p := recover()
+ assert.NotNil(t, p)
+
+ switch pVal := p.(type) {
+ case *Entry:
+ assert.Equal(t, "kaboom", pVal.Message)
+ assert.Equal(t, errBoom, pVal.Data["err"])
+ default:
+ t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
+ }
+ }()
+
+ logger := New()
+ logger.Out = &bytes.Buffer{}
+ entry := NewEntry(logger)
+ entry.WithField("err", errBoom).Panicln("kaboom")
+}
+
+func TestEntryPanicf(t *testing.T) {
+ errBoom := fmt.Errorf("boom again")
+
+ defer func() {
+ p := recover()
+ assert.NotNil(t, p)
+
+ switch pVal := p.(type) {
+ case *Entry:
+ assert.Equal(t, "kaboom true", pVal.Message)
+ assert.Equal(t, errBoom, pVal.Data["err"])
+ default:
+ t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
+ }
+ }()
+
+ logger := New()
+ logger.Out = &bytes.Buffer{}
+ entry := NewEntry(logger)
+ entry.WithField("err", errBoom).Panicf("kaboom %v", true)
+}
diff --git a/vendor/github.com/Sirupsen/logrus/examples/basic/basic.go b/vendor/github.com/Sirupsen/logrus/examples/basic/basic.go
new file mode 100644
index 0000000..a1623ec
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/examples/basic/basic.go
@@ -0,0 +1,50 @@
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+)
+
+var log = logrus.New()
+
+func init() {
+ log.Formatter = new(logrus.JSONFormatter)
+ log.Formatter = new(logrus.TextFormatter) // default
+ log.Level = logrus.DebugLevel
+}
+
+func main() {
+ defer func() {
+ err := recover()
+ if err != nil {
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "err": err,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+ }
+ }()
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "number": 8,
+ }).Debug("Started observing beach")
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(logrus.Fields{
+ "temperature": -4,
+ }).Debug("Temperature changes")
+
+ log.WithFields(logrus.Fields{
+ "animal": "orca",
+ "size": 9009,
+ }).Panic("It's over 9000!")
+}
diff --git a/vendor/github.com/Sirupsen/logrus/examples/hook/hook.go b/vendor/github.com/Sirupsen/logrus/examples/hook/hook.go
new file mode 100644
index 0000000..3187f6d
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/examples/hook/hook.go
@@ -0,0 +1,30 @@
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+ "gopkg.in/gemnasium/logrus-airbrake-hook.v2"
+)
+
+var log = logrus.New()
+
+func init() {
+ log.Formatter = new(logrus.TextFormatter) // default
+ log.Hooks.Add(airbrake.NewHook(123, "xyz", "development"))
+}
+
+func main() {
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+}
diff --git a/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go b/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go
new file mode 100644
index 0000000..c6d290c
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go
@@ -0,0 +1,98 @@
+package logrus
+
+import (
+ "fmt"
+ "testing"
+ "time"
+)
+
+// smallFields is a small size data set for benchmarking
+var smallFields = Fields{
+ "foo": "bar",
+ "baz": "qux",
+ "one": "two",
+ "three": "four",
+}
+
+// largeFields is a large size data set for benchmarking
+var largeFields = Fields{
+ "foo": "bar",
+ "baz": "qux",
+ "one": "two",
+ "three": "four",
+ "five": "six",
+ "seven": "eight",
+ "nine": "ten",
+ "eleven": "twelve",
+ "thirteen": "fourteen",
+ "fifteen": "sixteen",
+ "seventeen": "eighteen",
+ "nineteen": "twenty",
+ "a": "b",
+ "c": "d",
+ "e": "f",
+ "g": "h",
+ "i": "j",
+ "k": "l",
+ "m": "n",
+ "o": "p",
+ "q": "r",
+ "s": "t",
+ "u": "v",
+ "w": "x",
+ "y": "z",
+ "this": "will",
+ "make": "thirty",
+ "entries": "yeah",
+}
+
+var errorFields = Fields{
+ "foo": fmt.Errorf("bar"),
+ "baz": fmt.Errorf("qux"),
+}
+
+func BenchmarkErrorTextFormatter(b *testing.B) {
+ doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields)
+}
+
+func BenchmarkSmallTextFormatter(b *testing.B) {
+ doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields)
+}
+
+func BenchmarkLargeTextFormatter(b *testing.B) {
+ doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields)
+}
+
+func BenchmarkSmallColoredTextFormatter(b *testing.B) {
+ doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields)
+}
+
+func BenchmarkLargeColoredTextFormatter(b *testing.B) {
+ doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields)
+}
+
+func BenchmarkSmallJSONFormatter(b *testing.B) {
+ doBenchmark(b, &JSONFormatter{}, smallFields)
+}
+
+func BenchmarkLargeJSONFormatter(b *testing.B) {
+ doBenchmark(b, &JSONFormatter{}, largeFields)
+}
+
+func doBenchmark(b *testing.B, formatter Formatter, fields Fields) {
+ entry := &Entry{
+ Time: time.Time{},
+ Level: InfoLevel,
+ Message: "message",
+ Data: fields,
+ }
+ var d []byte
+ var err error
+ for i := 0; i < b.N; i++ {
+ d, err = formatter.Format(entry)
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.SetBytes(int64(len(d)))
+ }
+}
diff --git a/vendor/github.com/Sirupsen/logrus/hook_test.go b/vendor/github.com/Sirupsen/logrus/hook_test.go
new file mode 100644
index 0000000..13f34cb
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/hook_test.go
@@ -0,0 +1,122 @@
+package logrus
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type TestHook struct {
+ Fired bool
+}
+
+func (hook *TestHook) Fire(entry *Entry) error {
+ hook.Fired = true
+ return nil
+}
+
+func (hook *TestHook) Levels() []Level {
+ return []Level{
+ DebugLevel,
+ InfoLevel,
+ WarnLevel,
+ ErrorLevel,
+ FatalLevel,
+ PanicLevel,
+ }
+}
+
+func TestHookFires(t *testing.T) {
+ hook := new(TestHook)
+
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Hooks.Add(hook)
+ assert.Equal(t, hook.Fired, false)
+
+ log.Print("test")
+ }, func(fields Fields) {
+ assert.Equal(t, hook.Fired, true)
+ })
+}
+
+type ModifyHook struct {
+}
+
+func (hook *ModifyHook) Fire(entry *Entry) error {
+ entry.Data["wow"] = "whale"
+ return nil
+}
+
+func (hook *ModifyHook) Levels() []Level {
+ return []Level{
+ DebugLevel,
+ InfoLevel,
+ WarnLevel,
+ ErrorLevel,
+ FatalLevel,
+ PanicLevel,
+ }
+}
+
+func TestHookCanModifyEntry(t *testing.T) {
+ hook := new(ModifyHook)
+
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Hooks.Add(hook)
+ log.WithField("wow", "elephant").Print("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["wow"], "whale")
+ })
+}
+
+func TestCanFireMultipleHooks(t *testing.T) {
+ hook1 := new(ModifyHook)
+ hook2 := new(TestHook)
+
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Hooks.Add(hook1)
+ log.Hooks.Add(hook2)
+
+ log.WithField("wow", "elephant").Print("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["wow"], "whale")
+ assert.Equal(t, hook2.Fired, true)
+ })
+}
+
+type ErrorHook struct {
+ Fired bool
+}
+
+func (hook *ErrorHook) Fire(entry *Entry) error {
+ hook.Fired = true
+ return nil
+}
+
+func (hook *ErrorHook) Levels() []Level {
+ return []Level{
+ ErrorLevel,
+ }
+}
+
+func TestErrorHookShouldntFireOnInfo(t *testing.T) {
+ hook := new(ErrorHook)
+
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Hooks.Add(hook)
+ log.Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, hook.Fired, false)
+ })
+}
+
+func TestErrorHookShouldFireOnError(t *testing.T) {
+ hook := new(ErrorHook)
+
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Hooks.Add(hook)
+ log.Error("test")
+ }, func(fields Fields) {
+ assert.Equal(t, hook.Fired, true)
+ })
+}
diff --git a/vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md b/vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md
new file mode 100644
index 0000000..066704b
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md
@@ -0,0 +1,39 @@
+# Syslog Hooks for Logrus
+
+## Usage
+
+```go
+import (
+ "log/syslog"
+ "github.com/Sirupsen/logrus"
+ logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
+)
+
+func main() {
+ log := logrus.New()
+ hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+
+ if err == nil {
+ log.Hooks.Add(hook)
+ }
+}
+```
+
+If you want to connect to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). Just assign empty string to the first two parameters of `NewSyslogHook`. It should look like the following.
+
+```go
+import (
+ "log/syslog"
+ "github.com/Sirupsen/logrus"
+ logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
+)
+
+func main() {
+ log := logrus.New()
+ hook, err := logrus_syslog.NewSyslogHook("", "", syslog.LOG_INFO, "")
+
+ if err == nil {
+ log.Hooks.Add(hook)
+ }
+}
+```
\ No newline at end of file
diff --git a/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go b/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
new file mode 100644
index 0000000..a36e200
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
@@ -0,0 +1,54 @@
+// +build !windows,!nacl,!plan9
+
+package logrus_syslog
+
+import (
+ "fmt"
+ "github.com/Sirupsen/logrus"
+ "log/syslog"
+ "os"
+)
+
+// SyslogHook to send logs via syslog.
+type SyslogHook struct {
+ Writer *syslog.Writer
+ SyslogNetwork string
+ SyslogRaddr string
+}
+
+// Creates a hook to be added to an instance of logger. This is called with
+// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")`
+// `if err == nil { log.Hooks.Add(hook) }`
+func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) {
+ w, err := syslog.Dial(network, raddr, priority, tag)
+ return &SyslogHook{w, network, raddr}, err
+}
+
+func (hook *SyslogHook) Fire(entry *logrus.Entry) error {
+ line, err := entry.String()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err)
+ return err
+ }
+
+ switch entry.Level {
+ case logrus.PanicLevel:
+ return hook.Writer.Crit(line)
+ case logrus.FatalLevel:
+ return hook.Writer.Crit(line)
+ case logrus.ErrorLevel:
+ return hook.Writer.Err(line)
+ case logrus.WarnLevel:
+ return hook.Writer.Warning(line)
+ case logrus.InfoLevel:
+ return hook.Writer.Info(line)
+ case logrus.DebugLevel:
+ return hook.Writer.Debug(line)
+ default:
+ return nil
+ }
+}
+
+func (hook *SyslogHook) Levels() []logrus.Level {
+ return logrus.AllLevels
+}
diff --git a/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go b/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go
new file mode 100644
index 0000000..42762dc
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go
@@ -0,0 +1,26 @@
+package logrus_syslog
+
+import (
+ "github.com/Sirupsen/logrus"
+ "log/syslog"
+ "testing"
+)
+
+func TestLocalhostAddAndPrint(t *testing.T) {
+ log := logrus.New()
+ hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+
+ if err != nil {
+ t.Errorf("Unable to connect to local syslog.")
+ }
+
+ log.Hooks.Add(hook)
+
+ for _, level := range hook.Levels() {
+ if len(log.Hooks[level]) != 1 {
+ t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level]))
+ }
+ }
+
+ log.Info("Congratulations!")
+}
diff --git a/vendor/github.com/Sirupsen/logrus/hooks/test/test.go b/vendor/github.com/Sirupsen/logrus/hooks/test/test.go
new file mode 100644
index 0000000..0688125
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/hooks/test/test.go
@@ -0,0 +1,67 @@
+package test
+
+import (
+ "io/ioutil"
+
+ "github.com/Sirupsen/logrus"
+)
+
+// test.Hook is a hook designed for dealing with logs in test scenarios.
+type Hook struct {
+ Entries []*logrus.Entry
+}
+
+// Installs a test hook for the global logger.
+func NewGlobal() *Hook {
+
+ hook := new(Hook)
+ logrus.AddHook(hook)
+
+ return hook
+
+}
+
+// Installs a test hook for a given local logger.
+func NewLocal(logger *logrus.Logger) *Hook {
+
+ hook := new(Hook)
+ logger.Hooks.Add(hook)
+
+ return hook
+
+}
+
+// Creates a discarding logger and installs the test hook.
+func NewNullLogger() (*logrus.Logger, *Hook) {
+
+ logger := logrus.New()
+ logger.Out = ioutil.Discard
+
+ return logger, NewLocal(logger)
+
+}
+
+func (t *Hook) Fire(e *logrus.Entry) error {
+ t.Entries = append(t.Entries, e)
+ return nil
+}
+
+func (t *Hook) Levels() []logrus.Level {
+ return logrus.AllLevels
+}
+
+// LastEntry returns the last entry that was logged or nil.
+func (t *Hook) LastEntry() (l *logrus.Entry) {
+
+ if i := len(t.Entries) - 1; i < 0 {
+ return nil
+ } else {
+ return t.Entries[i]
+ }
+
+}
+
+// Reset removes all Entries from this test hook.
+func (t *Hook) Reset() {
+ t.Entries = make([]*logrus.Entry, 0)
+}
diff --git a/vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go b/vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go
new file mode 100644
index 0000000..d69455b
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go
@@ -0,0 +1,39 @@
+package test
+
+import (
+ "testing"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestAllHooks(t *testing.T) {
+
+ assert := assert.New(t)
+
+ logger, hook := NewNullLogger()
+ assert.Nil(hook.LastEntry())
+ assert.Equal(0, len(hook.Entries))
+
+ logger.Error("Hello error")
+ assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
+ assert.Equal("Hello error", hook.LastEntry().Message)
+ assert.Equal(1, len(hook.Entries))
+
+ logger.Warn("Hello warning")
+ assert.Equal(logrus.WarnLevel, hook.LastEntry().Level)
+ assert.Equal("Hello warning", hook.LastEntry().Message)
+ assert.Equal(2, len(hook.Entries))
+
+ hook.Reset()
+ assert.Nil(hook.LastEntry())
+ assert.Equal(0, len(hook.Entries))
+
+ hook = NewGlobal()
+
+ logrus.Error("Hello error")
+ assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
+ assert.Equal("Hello error", hook.LastEntry().Message)
+ assert.Equal(1, len(hook.Entries))
+
+}
diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter_test.go b/vendor/github.com/Sirupsen/logrus/json_formatter_test.go
new file mode 100644
index 0000000..1d70873
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/json_formatter_test.go
@@ -0,0 +1,120 @@
+package logrus
+
+import (
+ "encoding/json"
+ "errors"
+
+ "testing"
+)
+
+func TestErrorNotLost(t *testing.T) {
+ formatter := &JSONFormatter{}
+
+ b, err := formatter.Format(WithField("error", errors.New("wild walrus")))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+
+ entry := make(map[string]interface{})
+ err = json.Unmarshal(b, &entry)
+ if err != nil {
+ t.Fatal("Unable to unmarshal formatted entry: ", err)
+ }
+
+ if entry["error"] != "wild walrus" {
+ t.Fatal("Error field not set")
+ }
+}
+
+func TestErrorNotLostOnFieldNotNamedError(t *testing.T) {
+ formatter := &JSONFormatter{}
+
+ b, err := formatter.Format(WithField("omg", errors.New("wild walrus")))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+
+ entry := make(map[string]interface{})
+ err = json.Unmarshal(b, &entry)
+ if err != nil {
+ t.Fatal("Unable to unmarshal formatted entry: ", err)
+ }
+
+ if entry["omg"] != "wild walrus" {
+ t.Fatal("Error field not set")
+ }
+}
+
+func TestFieldClashWithTime(t *testing.T) {
+ formatter := &JSONFormatter{}
+
+ b, err := formatter.Format(WithField("time", "right now!"))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+
+ entry := make(map[string]interface{})
+ err = json.Unmarshal(b, &entry)
+ if err != nil {
+ t.Fatal("Unable to unmarshal formatted entry: ", err)
+ }
+
+ if entry["fields.time"] != "right now!" {
+ t.Fatal("fields.time not set to original time field")
+ }
+
+ if entry["time"] != "0001-01-01T00:00:00Z" {
+ t.Fatal("time field not set to current time, was: ", entry["time"])
+ }
+}
+
+func TestFieldClashWithMsg(t *testing.T) {
+ formatter := &JSONFormatter{}
+
+ b, err := formatter.Format(WithField("msg", "something"))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+
+ entry := make(map[string]interface{})
+ err = json.Unmarshal(b, &entry)
+ if err != nil {
+ t.Fatal("Unable to unmarshal formatted entry: ", err)
+ }
+
+ if entry["fields.msg"] != "something" {
+ t.Fatal("fields.msg not set to original msg field")
+ }
+}
+
+func TestFieldClashWithLevel(t *testing.T) {
+ formatter := &JSONFormatter{}
+
+ b, err := formatter.Format(WithField("level", "something"))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+
+ entry := make(map[string]interface{})
+ err = json.Unmarshal(b, &entry)
+ if err != nil {
+ t.Fatal("Unable to unmarshal formatted entry: ", err)
+ }
+
+ if entry["fields.level"] != "something" {
+ t.Fatal("fields.level not set to original level field")
+ }
+}
+
+func TestJSONEntryEndsWithNewline(t *testing.T) {
+ formatter := &JSONFormatter{}
+
+ b, err := formatter.Format(WithField("level", "something"))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+
+ if b[len(b)-1] != '\n' {
+ t.Fatal("Expected JSON log entry to end with a newline")
+ }
+}
diff --git a/vendor/github.com/Sirupsen/logrus/logger_bench_test.go b/vendor/github.com/Sirupsen/logrus/logger_bench_test.go
new file mode 100644
index 0000000..dd23a35
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/logger_bench_test.go
@@ -0,0 +1,61 @@
+package logrus
+
+import (
+ "os"
+ "testing"
+)
+
+// smallFields is a small size data set for benchmarking
+var loggerFields = Fields{
+ "foo": "bar",
+ "baz": "qux",
+ "one": "two",
+ "three": "four",
+}
+
+func BenchmarkDummyLogger(b *testing.B) {
+ nullf, err := os.OpenFile("/dev/null", os.O_WRONLY, 0666)
+ if err != nil {
+ b.Fatalf("%v", err)
+ }
+ defer nullf.Close()
+ doLoggerBenchmark(b, nullf, &TextFormatter{DisableColors: true}, smallFields)
+}
+
+func BenchmarkDummyLoggerNoLock(b *testing.B) {
+ nullf, err := os.OpenFile("/dev/null", os.O_WRONLY|os.O_APPEND, 0666)
+ if err != nil {
+ b.Fatalf("%v", err)
+ }
+ defer nullf.Close()
+ doLoggerBenchmarkNoLock(b, nullf, &TextFormatter{DisableColors: true}, smallFields)
+}
+
+func doLoggerBenchmark(b *testing.B, out *os.File, formatter Formatter, fields Fields) {
+ logger := Logger{
+ Out: out,
+ Level: InfoLevel,
+ Formatter: formatter,
+ }
+ entry := logger.WithFields(fields)
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ entry.Info("aaa")
+ }
+ })
+}
+
+func doLoggerBenchmarkNoLock(b *testing.B, out *os.File, formatter Formatter, fields Fields) {
+ logger := Logger{
+ Out: out,
+ Level: InfoLevel,
+ Formatter: formatter,
+ }
+ logger.SetNoLock()
+ entry := logger.WithFields(fields)
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ entry.Info("aaa")
+ }
+ })
+}
diff --git a/vendor/github.com/Sirupsen/logrus/logrus_test.go b/vendor/github.com/Sirupsen/logrus/logrus_test.go
new file mode 100644
index 0000000..bfc4780
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/logrus_test.go
@@ -0,0 +1,361 @@
+package logrus
+
+import (
+ "bytes"
+ "encoding/json"
+ "strconv"
+ "strings"
+ "sync"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) {
+ var buffer bytes.Buffer
+ var fields Fields
+
+ logger := New()
+ logger.Out = &buffer
+ logger.Formatter = new(JSONFormatter)
+
+ log(logger)
+
+ err := json.Unmarshal(buffer.Bytes(), &fields)
+ assert.Nil(t, err)
+
+ assertions(fields)
+}
+
+func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) {
+ var buffer bytes.Buffer
+
+ logger := New()
+ logger.Out = &buffer
+ logger.Formatter = &TextFormatter{
+ DisableColors: true,
+ }
+
+ log(logger)
+
+ fields := make(map[string]string)
+ for _, kv := range strings.Split(buffer.String(), " ") {
+ if !strings.Contains(kv, "=") {
+ continue
+ }
+ kvArr := strings.Split(kv, "=")
+ key := strings.TrimSpace(kvArr[0])
+ val := kvArr[1]
+ if kvArr[1][0] == '"' {
+ var err error
+ val, err = strconv.Unquote(val)
+ assert.NoError(t, err)
+ }
+ fields[key] = val
+ }
+ assertions(fields)
+}
+
+func TestPrint(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Print("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test")
+ assert.Equal(t, fields["level"], "info")
+ })
+}
+
+func TestInfo(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test")
+ assert.Equal(t, fields["level"], "info")
+ })
+}
+
+func TestWarn(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Warn("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test")
+ assert.Equal(t, fields["level"], "warning")
+ })
+}
+
+func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Infoln("test", "test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test test")
+ })
+}
+
+func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Infoln("test", 10)
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test 10")
+ })
+}
+
+func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Infoln(10, 10)
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "10 10")
+ })
+}
+
+func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Infoln(10, 10)
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "10 10")
+ })
+}
+
+func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Info("test", 10)
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test10")
+ })
+}
+
+func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Info("test", "test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "testtest")
+ })
+}
+
+func TestWithFieldsShouldAllowAssignments(t *testing.T) {
+ var buffer bytes.Buffer
+ var fields Fields
+
+ logger := New()
+ logger.Out = &buffer
+ logger.Formatter = new(JSONFormatter)
+
+ localLog := logger.WithFields(Fields{
+ "key1": "value1",
+ })
+
+ localLog.WithField("key2", "value2").Info("test")
+ err := json.Unmarshal(buffer.Bytes(), &fields)
+ assert.Nil(t, err)
+
+ assert.Equal(t, "value2", fields["key2"])
+ assert.Equal(t, "value1", fields["key1"])
+
+ buffer = bytes.Buffer{}
+ fields = Fields{}
+ localLog.Info("test")
+ err = json.Unmarshal(buffer.Bytes(), &fields)
+ assert.Nil(t, err)
+
+ _, ok := fields["key2"]
+ assert.Equal(t, false, ok)
+ assert.Equal(t, "value1", fields["key1"])
+}
+
+func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.WithField("msg", "hello").Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test")
+ })
+}
+
+func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.WithField("msg", "hello").Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test")
+ assert.Equal(t, fields["fields.msg"], "hello")
+ })
+}
+
+func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.WithField("time", "hello").Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["fields.time"], "hello")
+ })
+}
+
+func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.WithField("level", 1).Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["level"], "info")
+ assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only
+ })
+}
+
+func TestDefaultFieldsAreNotPrefixed(t *testing.T) {
+ LogAndAssertText(t, func(log *Logger) {
+ ll := log.WithField("herp", "derp")
+ ll.Info("hello")
+ ll.Info("bye")
+ }, func(fields map[string]string) {
+ for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} {
+ if _, ok := fields[fieldName]; ok {
+ t.Fatalf("should not have prefixed %q: %v", fieldName, fields)
+ }
+ }
+ })
+}
+
+func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) {
+
+ var buffer bytes.Buffer
+ var fields Fields
+
+ logger := New()
+ logger.Out = &buffer
+ logger.Formatter = new(JSONFormatter)
+
+ llog := logger.WithField("context", "eating raw fish")
+
+ llog.Info("looks delicious")
+
+ err := json.Unmarshal(buffer.Bytes(), &fields)
+ assert.NoError(t, err, "should have decoded first message")
+ assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
+ assert.Equal(t, fields["msg"], "looks delicious")
+ assert.Equal(t, fields["context"], "eating raw fish")
+
+ buffer.Reset()
+
+ llog.Warn("omg it is!")
+
+ err = json.Unmarshal(buffer.Bytes(), &fields)
+ assert.NoError(t, err, "should have decoded second message")
+ assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
+ assert.Equal(t, fields["msg"], "omg it is!")
+ assert.Equal(t, fields["context"], "eating raw fish")
+ assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry")
+
+}
+
+func TestConvertLevelToString(t *testing.T) {
+ assert.Equal(t, "debug", DebugLevel.String())
+ assert.Equal(t, "info", InfoLevel.String())
+ assert.Equal(t, "warning", WarnLevel.String())
+ assert.Equal(t, "error", ErrorLevel.String())
+ assert.Equal(t, "fatal", FatalLevel.String())
+ assert.Equal(t, "panic", PanicLevel.String())
+}
+
+func TestParseLevel(t *testing.T) {
+ l, err := ParseLevel("panic")
+ assert.Nil(t, err)
+ assert.Equal(t, PanicLevel, l)
+
+ l, err = ParseLevel("PANIC")
+ assert.Nil(t, err)
+ assert.Equal(t, PanicLevel, l)
+
+ l, err = ParseLevel("fatal")
+ assert.Nil(t, err)
+ assert.Equal(t, FatalLevel, l)
+
+ l, err = ParseLevel("FATAL")
+ assert.Nil(t, err)
+ assert.Equal(t, FatalLevel, l)
+
+ l, err = ParseLevel("error")
+ assert.Nil(t, err)
+ assert.Equal(t, ErrorLevel, l)
+
+ l, err = ParseLevel("ERROR")
+ assert.Nil(t, err)
+ assert.Equal(t, ErrorLevel, l)
+
+ l, err = ParseLevel("warn")
+ assert.Nil(t, err)
+ assert.Equal(t, WarnLevel, l)
+
+ l, err = ParseLevel("WARN")
+ assert.Nil(t, err)
+ assert.Equal(t, WarnLevel, l)
+
+ l, err = ParseLevel("warning")
+ assert.Nil(t, err)
+ assert.Equal(t, WarnLevel, l)
+
+ l, err = ParseLevel("WARNING")
+ assert.Nil(t, err)
+ assert.Equal(t, WarnLevel, l)
+
+ l, err = ParseLevel("info")
+ assert.Nil(t, err)
+ assert.Equal(t, InfoLevel, l)
+
+ l, err = ParseLevel("INFO")
+ assert.Nil(t, err)
+ assert.Equal(t, InfoLevel, l)
+
+ l, err = ParseLevel("debug")
+ assert.Nil(t, err)
+ assert.Equal(t, DebugLevel, l)
+
+ l, err = ParseLevel("DEBUG")
+ assert.Nil(t, err)
+ assert.Equal(t, DebugLevel, l)
+
+ l, err = ParseLevel("invalid")
+ assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error())
+}
+
+func TestGetSetLevelRace(t *testing.T) {
+ wg := sync.WaitGroup{}
+ for i := 0; i < 100; i++ {
+ wg.Add(1)
+ go func(i int) {
+ defer wg.Done()
+ if i%2 == 0 {
+ SetLevel(InfoLevel)
+ } else {
+ GetLevel()
+ }
+ }(i)
+
+ }
+ wg.Wait()
+}
+
+func TestLoggingRace(t *testing.T) {
+ logger := New()
+
+ var wg sync.WaitGroup
+ wg.Add(100)
+
+ for i := 0; i < 100; i++ {
+ go func() {
+ logger.Info("info")
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+}
+
+// Compile test
+func TestLogrusInterface(t *testing.T) {
+ var buffer bytes.Buffer
+ fn := func(l FieldLogger) {
+ b := l.WithField("key", "value")
+ b.Debug("Test")
+ }
+ // test logger
+ logger := New()
+ logger.Out = &buffer
+ fn(logger)
+
+ // test Entry
+ e := logger.WithField("another", "value")
+ fn(e)
+}
diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter_test.go b/vendor/github.com/Sirupsen/logrus/text_formatter_test.go
new file mode 100644
index 0000000..e25a44f
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/text_formatter_test.go
@@ -0,0 +1,61 @@
+package logrus
+
+import (
+ "bytes"
+ "errors"
+ "testing"
+ "time"
+)
+
+func TestQuoting(t *testing.T) {
+ tf := &TextFormatter{DisableColors: true}
+
+ checkQuoting := func(q bool, value interface{}) {
+ b, _ := tf.Format(WithField("test", value))
+ idx := bytes.Index(b, ([]byte)("test="))
+ cont := bytes.Contains(b[idx+5:], []byte{'"'})
+ if cont != q {
+ if q {
+ t.Errorf("quoting expected for: %#v", value)
+ } else {
+ t.Errorf("quoting not expected for: %#v", value)
+ }
+ }
+ }
+
+ checkQuoting(false, "abcd")
+ checkQuoting(false, "v1.0")
+ checkQuoting(false, "1234567890")
+ checkQuoting(true, "/foobar")
+ checkQuoting(true, "x y")
+ checkQuoting(true, "x,y")
+ checkQuoting(false, errors.New("invalid"))
+ checkQuoting(true, errors.New("invalid argument"))
+}
+
+func TestTimestampFormat(t *testing.T) {
+ checkTimeStr := func(format string) {
+ customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format}
+ customStr, _ := customFormatter.Format(WithField("test", "test"))
+ timeStart := bytes.Index(customStr, ([]byte)("time="))
+ timeEnd := bytes.Index(customStr, ([]byte)("level="))
+ timeStr := customStr[timeStart+5 : timeEnd-1]
+ if timeStr[0] == '"' && timeStr[len(timeStr)-1] == '"' {
+ timeStr = timeStr[1 : len(timeStr)-1]
+ }
+ if format == "" {
+ format = time.RFC3339
+ }
+ _, e := time.Parse(format, (string)(timeStr))
+ if e != nil {
+ t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e)
+ }
+ }
+
+ checkTimeStr("2006-01-02T15:04:05.000000000Z07:00")
+ checkTimeStr("Mon Jan _2 15:04:05 2006")
+ checkTimeStr("")
+}
+
+// TODO add tests for sorting etc., this requires a parser for the text
+// formatter output.
diff --git a/vendor/github.com/fatih/structs/field_test.go b/vendor/github.com/fatih/structs/field_test.go
new file mode 100644
index 0000000..b77e951
--- /dev/null
+++ b/vendor/github.com/fatih/structs/field_test.go
@@ -0,0 +1,383 @@
+package structs
+
+import (
+ "reflect"
+ "testing"
+)
+
+// A test struct that defines all cases
+type Foo struct {
+ A string
+ B int `structs:"y"`
+ C bool `json:"c"`
+ d string // not exported
+ E *Baz
+ x string `xml:"x"` // not exported, with tag
+ Y []string
+ Z map[string]interface{}
+ *Bar // embedded
+}
+
+type Baz struct {
+ A string
+ B int
+}
+
+type Bar struct {
+ E string
+ F int
+ g []string
+}
+
+func newStruct() *Struct {
+ b := &Bar{
+ E: "example",
+ F: 2,
+ g: []string{"zeynep", "fatih"},
+ }
+
+ // B and x is not initialized for testing
+ f := &Foo{
+ A: "gopher",
+ C: true,
+ d: "small",
+ E: nil,
+ Y: []string{"example"},
+ Z: nil,
+ }
+ f.Bar = b
+
+ return New(f)
+}
+
+func TestField_Set(t *testing.T) {
+ s := newStruct()
+
+ f := s.Field("A")
+ err := f.Set("fatih")
+ if err != nil {
+ t.Error(err)
+ }
+
+ if f.Value().(string) != "fatih" {
+ t.Errorf("Setted value is wrong: %s want: %s", f.Value().(string), "fatih")
+ }
+
+ f = s.Field("Y")
+ err = f.Set([]string{"override", "with", "this"})
+ if err != nil {
+ t.Error(err)
+ }
+
+ sliceLen := len(f.Value().([]string))
+ if sliceLen != 3 {
+ t.Errorf("Setted values slice length is wrong: %d, want: %d", sliceLen, 3)
+ }
+
+ f = s.Field("C")
+ err = f.Set(false)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if f.Value().(bool) {
+ t.Errorf("Setted value is wrong: %t want: %t", f.Value().(bool), false)
+ }
+
+ // let's pass a different type
+ f = s.Field("A")
+ err = f.Set(123) // Field A is of type string, but we are going to pass an integer
+ if err == nil {
+ t.Error("Setting a field's value with a different type than the field's type should return an error")
+ }
+
+ // old value should be still there :)
+ if f.Value().(string) != "fatih" {
+ t.Errorf("Setted value is wrong: %s want: %s", f.Value().(string), "fatih")
+ }
+
+ // let's access an unexported field, which should give an error
+ f = s.Field("d")
+ err = f.Set("large")
+ if err != errNotExported {
+ t.Error(err)
+ }
+
+ // let's set a pointer to struct
+ b := &Bar{
+ E: "gopher",
+ F: 2,
+ }
+
+ f = s.Field("Bar")
+ err = f.Set(b)
+ if err != nil {
+ t.Error(err)
+ }
+
+ baz := &Baz{
+ A: "helloWorld",
+ B: 42,
+ }
+
+ f = s.Field("E")
+ err = f.Set(baz)
+ if err != nil {
+ t.Error(err)
+ }
+
+ ba := s.Field("E").Value().(*Baz)
+
+ if ba.A != "helloWorld" {
+ t.Errorf("could not set baz. Got: %s Want: helloWorld", ba.A)
+ }
+}
+
+func TestField_Zero(t *testing.T) {
+ s := newStruct()
+
+ f := s.Field("A")
+ err := f.Zero()
+ if err != nil {
+ t.Error(err)
+ }
+
+ if f.Value().(string) != "" {
+ t.Errorf("Zeroed value is wrong: %s want: %s", f.Value().(string), "")
+ }
+
+ f = s.Field("Y")
+ err = f.Zero()
+ if err != nil {
+ t.Error(err)
+ }
+
+ sliceLen := len(f.Value().([]string))
+ if sliceLen != 0 {
+ t.Errorf("Zeroed values slice length is wrong: %d, want: %d", sliceLen, 0)
+ }
+
+ f = s.Field("C")
+ err = f.Zero()
+ if err != nil {
+ t.Error(err)
+ }
+
+ if f.Value().(bool) {
+ t.Errorf("Zeroed value is wrong: %t want: %t", f.Value().(bool), false)
+ }
+
+ // let's access an unexported field, which should give an error
+ f = s.Field("d")
+ err = f.Zero()
+ if err != errNotExported {
+ t.Error(err)
+ }
+
+ f = s.Field("Bar")
+ err = f.Zero()
+ if err != nil {
+ t.Error(err)
+ }
+
+ f = s.Field("E")
+ err = f.Zero()
+ if err != nil {
+ t.Error(err)
+ }
+
+ v := s.Field("E").value
+ if !v.IsNil() {
+ t.Errorf("could not set baz. Got: %s Want: ", v.Interface())
+ }
+}
+
+func TestField(t *testing.T) {
+ s := newStruct()
+
+ defer func() {
+ err := recover()
+ if err == nil {
+ t.Error("Retrieveing a non existing field from the struct should panic")
+ }
+ }()
+
+ _ = s.Field("no-field")
+}
+
+func TestField_Kind(t *testing.T) {
+ s := newStruct()
+
+ f := s.Field("A")
+ if f.Kind() != reflect.String {
+ t.Errorf("Field A has wrong kind: %s want: %s", f.Kind(), reflect.String)
+ }
+
+ f = s.Field("B")
+ if f.Kind() != reflect.Int {
+ t.Errorf("Field B has wrong kind: %s want: %s", f.Kind(), reflect.Int)
+ }
+
+ // unexported
+ f = s.Field("d")
+ if f.Kind() != reflect.String {
+ t.Errorf("Field d has wrong kind: %s want: %s", f.Kind(), reflect.String)
+ }
+}
+
+func TestField_Tag(t *testing.T) {
+ s := newStruct()
+
+ v := s.Field("B").Tag("json")
+ if v != "" {
+ t.Errorf("Field's tag value of a non existing tag should return empty, got: %s", v)
+ }
+
+ v = s.Field("C").Tag("json")
+ if v != "c" {
+ t.Errorf("Field's tag value of the existing field C should return 'c', got: %s", v)
+ }
+
+ v = s.Field("d").Tag("json")
+ if v != "" {
+ t.Errorf("Field's tag value of a non exported field should return empty, got: %s", v)
+ }
+
+ v = s.Field("x").Tag("xml")
+ if v != "x" {
+ t.Errorf("Field's tag value of a non exported field with a tag should return 'x', got: %s", v)
+ }
+
+ v = s.Field("A").Tag("json")
+ if v != "" {
+ t.Errorf("Field's tag value of a existing field without a tag should return empty, got: %s", v)
+ }
+}
+
+func TestField_Value(t *testing.T) {
+ s := newStruct()
+
+ v := s.Field("A").Value()
+ val, ok := v.(string)
+ if !ok {
+ t.Errorf("Field's value of a A should be string")
+ }
+
+ if val != "gopher" {
+ t.Errorf("Field's value of a existing tag should return 'gopher', got: %s", val)
+ }
+
+ defer func() {
+ err := recover()
+ if err == nil {
+ t.Error("Value of a non exported field from the field should panic")
+ }
+ }()
+
+ // should panic
+ _ = s.Field("d").Value()
+}
+
+func TestField_IsEmbedded(t *testing.T) {
+ s := newStruct()
+
+ if !s.Field("Bar").IsEmbedded() {
+ t.Errorf("Fields 'Bar' field is an embedded field")
+ }
+
+ if s.Field("d").IsEmbedded() {
+ t.Errorf("Fields 'd' field is not an embedded field")
+ }
+}
+
+func TestField_IsExported(t *testing.T) {
+ s := newStruct()
+
+ if !s.Field("Bar").IsExported() {
+ t.Errorf("Fields 'Bar' field is an exported field")
+ }
+
+ if !s.Field("A").IsExported() {
+ t.Errorf("Fields 'A' field is an exported field")
+ }
+
+ if s.Field("d").IsExported() {
+ t.Errorf("Fields 'd' field is not an exported field")
+ }
+}
+
+func TestField_IsZero(t *testing.T) {
+ s := newStruct()
+
+ if s.Field("A").IsZero() {
+ t.Errorf("Fields 'A' field is an initialized field")
+ }
+
+ if !s.Field("B").IsZero() {
+ t.Errorf("Fields 'B' field is not an initialized field")
+ }
+}
+
+func TestField_Name(t *testing.T) {
+ s := newStruct()
+
+ if s.Field("A").Name() != "A" {
+ t.Errorf("Fields 'A' field should have the name 'A'")
+ }
+}
+
+func TestField_Field(t *testing.T) {
+ s := newStruct()
+
+ e := s.Field("Bar").Field("E")
+
+ val, ok := e.Value().(string)
+ if !ok {
+ t.Error("The value of the field 'e' inside 'Bar' struct should be string")
+ }
+
+ if val != "example" {
+ t.Errorf("The value of 'e' should be 'example, got: %s", val)
+ }
+
+ defer func() {
+ err := recover()
+ if err == nil {
+ t.Error("Field of a non existing nested struct should panic")
+ }
+ }()
+
+ _ = s.Field("Bar").Field("e")
+}
+
+func TestField_Fields(t *testing.T) {
+ s := newStruct()
+ fields := s.Field("Bar").Fields()
+
+ if len(fields) != 3 {
+ t.Errorf("We expect 3 fields in embedded struct, was: %d", len(fields))
+ }
+}
+
+func TestField_FieldOk(t *testing.T) {
+ s := newStruct()
+
+ b, ok := s.FieldOk("Bar")
+ if !ok {
+ t.Error("The field 'Bar' should exists.")
+ }
+
+ e, ok := b.FieldOk("E")
+ if !ok {
+ t.Error("The field 'E' should exists.")
+ }
+
+ val, ok := e.Value().(string)
+ if !ok {
+ t.Error("The value of the field 'e' inside 'Bar' struct should be string")
+ }
+
+ if val != "example" {
+ t.Errorf("The value of 'e' should be 'example, got: %s", val)
+ }
+}
diff --git a/vendor/github.com/fatih/structs/structs_example_test.go b/vendor/github.com/fatih/structs/structs_example_test.go
new file mode 100644
index 0000000..32bb829
--- /dev/null
+++ b/vendor/github.com/fatih/structs/structs_example_test.go
@@ -0,0 +1,351 @@
+package structs
+
+import (
+ "fmt"
+ "time"
+)
+
+func ExampleNew() {
+ type Server struct {
+ Name string
+ ID int32
+ Enabled bool
+ }
+
+ server := &Server{
+ Name: "Arslan",
+ ID: 123456,
+ Enabled: true,
+ }
+
+ s := New(server)
+
+ fmt.Printf("Name : %v\n", s.Name())
+ fmt.Printf("Values : %v\n", s.Values())
+ fmt.Printf("Value of ID : %v\n", s.Field("ID").Value())
+ // Output:
+ // Name : Server
+ // Values : [Arslan 123456 true]
+ // Value of ID : 123456
+
+}
+
+func ExampleMap() {
+ type Server struct {
+ Name string
+ ID int32
+ Enabled bool
+ }
+
+ s := &Server{
+ Name: "Arslan",
+ ID: 123456,
+ Enabled: true,
+ }
+
+ m := Map(s)
+
+ fmt.Printf("%#v\n", m["Name"])
+ fmt.Printf("%#v\n", m["ID"])
+ fmt.Printf("%#v\n", m["Enabled"])
+ // Output:
+ // "Arslan"
+ // 123456
+ // true
+
+}
+
+func ExampleMap_tags() {
+ // Custom tags can change the map keys instead of using the fields name
+ type Server struct {
+ Name string `structs:"server_name"`
+ ID int32 `structs:"server_id"`
+ Enabled bool `structs:"enabled"`
+ }
+
+ s := &Server{
+ Name: "Zeynep",
+ ID: 789012,
+ }
+
+ m := Map(s)
+
+ // access them by the custom tags defined above
+ fmt.Printf("%#v\n", m["server_name"])
+ fmt.Printf("%#v\n", m["server_id"])
+ fmt.Printf("%#v\n", m["enabled"])
+ // Output:
+ // "Zeynep"
+ // 789012
+ // false
+
+}
+
+func ExampleMap_nested() {
+ // By default field with struct types are processed too. We can stop
+ // processing them via "omitnested" tag option.
+ type Server struct {
+ Name string `structs:"server_name"`
+ ID int32 `structs:"server_id"`
+ Time time.Time `structs:"time,omitnested"` // do not convert to map[string]interface{}
+ }
+
+ const shortForm = "2006-Jan-02"
+ t, _ := time.Parse("2006-Jan-02", "2013-Feb-03")
+
+ s := &Server{
+ Name: "Zeynep",
+ ID: 789012,
+ Time: t,
+ }
+
+ m := Map(s)
+
+ // access them by the custom tags defined above
+ fmt.Printf("%v\n", m["server_name"])
+ fmt.Printf("%v\n", m["server_id"])
+ fmt.Printf("%v\n", m["time"].(time.Time))
+ // Output:
+ // Zeynep
+ // 789012
+ // 2013-02-03 00:00:00 +0000 UTC
+}
+
+func ExampleMap_omitEmpty() {
+ // By default field with struct types of zero values are processed too. We
+ // can stop processing them via "omitempty" tag option.
+ type Server struct {
+ Name string `structs:",omitempty"`
+ ID int32 `structs:"server_id,omitempty"`
+ Location string
+ }
+
+ // Only add location
+ s := &Server{
+ Location: "Tokyo",
+ }
+
+ m := Map(s)
+
+ // map contains only the Location field
+ fmt.Printf("%v\n", m)
+ // Output:
+ // map[Location:Tokyo]
+}
+
+func ExampleValues() {
+ type Server struct {
+ Name string
+ ID int32
+ Enabled bool
+ }
+
+ s := &Server{
+ Name: "Fatih",
+ ID: 135790,
+ Enabled: false,
+ }
+
+ m := Values(s)
+
+ fmt.Printf("Values: %+v\n", m)
+ // Output:
+ // Values: [Fatih 135790 false]
+}
+
+func ExampleValues_omitEmpty() {
+ // By default field with struct types of zero values are processed too. We
+ // can stop processing them via "omitempty" tag option.
+ type Server struct {
+ Name string `structs:",omitempty"`
+ ID int32 `structs:"server_id,omitempty"`
+ Location string
+ }
+
+ // Only add location
+ s := &Server{
+ Location: "Ankara",
+ }
+
+ m := Values(s)
+
+ // values contains only the Location field
+ fmt.Printf("Values: %+v\n", m)
+ // Output:
+ // Values: [Ankara]
+}
+
+func ExampleValues_tags() {
+ type Location struct {
+ City string
+ Country string
+ }
+
+ type Server struct {
+ Name string
+ ID int32
+ Enabled bool
+ Location Location `structs:"-"` // values from location are not included anymore
+ }
+
+ s := &Server{
+ Name: "Fatih",
+ ID: 135790,
+ Enabled: false,
+ Location: Location{City: "Ankara", Country: "Turkey"},
+ }
+
+ // Let get all values from the struct s. Note that we don't include values
+ // from the Location field
+ m := Values(s)
+
+ fmt.Printf("Values: %+v\n", m)
+ // Output:
+ // Values: [Fatih 135790 false]
+}
+
+func ExampleFields() {
+ type Access struct {
+ Name string
+ LastAccessed time.Time
+ Number int
+ }
+
+ s := &Access{
+ Name: "Fatih",
+ LastAccessed: time.Now(),
+ Number: 1234567,
+ }
+
+ fields := Fields(s)
+
+ for i, field := range fields {
+ fmt.Printf("[%d] %+v\n", i, field.Name())
+ }
+
+ // Output:
+ // [0] Name
+ // [1] LastAccessed
+ // [2] Number
+}
+
+func ExampleFields_nested() {
+ type Person struct {
+ Name string
+ Number int
+ }
+
+ type Access struct {
+ Person Person
+ HasPermission bool
+ LastAccessed time.Time
+ }
+
+ s := &Access{
+ Person: Person{Name: "fatih", Number: 1234567},
+ LastAccessed: time.Now(),
+ HasPermission: true,
+ }
+
+ // Let's get all fields from the struct s.
+ fields := Fields(s)
+
+ for _, field := range fields {
+ if field.Name() == "Person" {
+ fmt.Printf("Access.Person.Name: %+v\n", field.Field("Name").Value())
+ }
+ }
+
+ // Output:
+ // Access.Person.Name: fatih
+}
+
+func ExampleField() {
+ type Person struct {
+ Name string
+ Number int
+ }
+
+ type Access struct {
+ Person Person
+ HasPermission bool
+ LastAccessed time.Time
+ }
+
+ access := &Access{
+ Person: Person{Name: "fatih", Number: 1234567},
+ LastAccessed: time.Now(),
+ HasPermission: true,
+ }
+
+ // Create a new Struct type
+ s := New(access)
+
+ // Get the Field type for "Person" field
+ p := s.Field("Person")
+
+ // Get the underlying "Name field" and print the value of it
+ name := p.Field("Name")
+
+ fmt.Printf("Value of Person.Access.Name: %+v\n", name.Value())
+
+ // Output:
+ // Value of Person.Access.Name: fatih
+
+}
+
+func ExampleIsZero() {
+ type Server struct {
+ Name string
+ ID int32
+ Enabled bool
+ }
+
+ // Nothing is initalized
+ a := &Server{}
+ isZeroA := IsZero(a)
+
+ // Name and Enabled is initialized, but not ID
+ b := &Server{
+ Name: "Golang",
+ Enabled: true,
+ }
+ isZeroB := IsZero(b)
+
+ fmt.Printf("%#v\n", isZeroA)
+ fmt.Printf("%#v\n", isZeroB)
+ // Output:
+ // true
+ // false
+}
+
+func ExampleHasZero() {
+ // Let's define an Access struct. Note that the "Enabled" field is not
+ // going to be checked because we added the "structs" tag to the field.
+ type Access struct {
+ Name string
+ LastAccessed time.Time
+ Number int
+ Enabled bool `structs:"-"`
+ }
+
+ // Name and Number is not initialized.
+ a := &Access{
+ LastAccessed: time.Now(),
+ }
+ hasZeroA := HasZero(a)
+
+ // Name and Number is initialized.
+ b := &Access{
+ Name: "Fatih",
+ LastAccessed: time.Now(),
+ Number: 12345,
+ }
+ hasZeroB := HasZero(b)
+
+ fmt.Printf("%#v\n", hasZeroA)
+ fmt.Printf("%#v\n", hasZeroB)
+ // Output:
+ // true
+ // false
+}
diff --git a/vendor/github.com/fatih/structs/structs_test.go b/vendor/github.com/fatih/structs/structs_test.go
new file mode 100644
index 0000000..b1b05a1
--- /dev/null
+++ b/vendor/github.com/fatih/structs/structs_test.go
@@ -0,0 +1,1109 @@
+package structs
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+ "time"
+)
+
+func TestMapNonStruct(t *testing.T) {
+ foo := []string{"foo"}
+
+ defer func() {
+ err := recover()
+ if err == nil {
+ t.Error("Passing a non struct into Map should panic")
+ }
+ }()
+
+ // this should panic. We are going to recover and and test it
+ _ = Map(foo)
+}
+
+func TestStructIndexes(t *testing.T) {
+ type C struct {
+ something int
+ Props map[string]interface{}
+ }
+
+ defer func() {
+ err := recover()
+ if err != nil {
+ fmt.Printf("err %+v\n", err)
+ t.Error("Using mixed indexes should not panic")
+ }
+ }()
+
+ // They should not panic
+ _ = Map(&C{})
+ _ = Fields(&C{})
+ _ = Values(&C{})
+ _ = IsZero(&C{})
+ _ = HasZero(&C{})
+}
+
+func TestMap(t *testing.T) {
+ var T = struct {
+ A string
+ B int
+ C bool
+ }{
+ A: "a-value",
+ B: 2,
+ C: true,
+ }
+
+ a := Map(T)
+
+ if typ := reflect.TypeOf(a).Kind(); typ != reflect.Map {
+ t.Errorf("Map should return a map type, got: %v", typ)
+ }
+
+ // we have three fields
+ if len(a) != 3 {
+ t.Errorf("Map should return a map of len 3, got: %d", len(a))
+ }
+
+ inMap := func(val interface{}) bool {
+ for _, v := range a {
+ if reflect.DeepEqual(v, val) {
+ return true
+ }
+ }
+
+ return false
+ }
+
+ for _, val := range []interface{}{"a-value", 2, true} {
+ if !inMap(val) {
+ t.Errorf("Map should have the value %v", val)
+ }
+ }
+
+}
+
+func TestMap_Tag(t *testing.T) {
+ var T = struct {
+ A string `structs:"x"`
+ B int `structs:"y"`
+ C bool `structs:"z"`
+ }{
+ A: "a-value",
+ B: 2,
+ C: true,
+ }
+
+ a := Map(T)
+
+ inMap := func(key interface{}) bool {
+ for k := range a {
+ if reflect.DeepEqual(k, key) {
+ return true
+ }
+ }
+ return false
+ }
+
+ for _, key := range []string{"x", "y", "z"} {
+ if !inMap(key) {
+ t.Errorf("Map should have the key %v", key)
+ }
+ }
+
+}
+
+func TestMap_CustomTag(t *testing.T) {
+ var T = struct {
+ A string `json:"x"`
+ B int `json:"y"`
+ C bool `json:"z"`
+ D struct {
+ E string `json:"jkl"`
+ } `json:"nested"`
+ }{
+ A: "a-value",
+ B: 2,
+ C: true,
+ }
+ T.D.E = "e-value"
+
+ s := New(T)
+ s.TagName = "json"
+
+ a := s.Map()
+
+ inMap := func(key interface{}) bool {
+ for k := range a {
+ if reflect.DeepEqual(k, key) {
+ return true
+ }
+ }
+ return false
+ }
+
+ for _, key := range []string{"x", "y", "z"} {
+ if !inMap(key) {
+ t.Errorf("Map should have the key %v", key)
+ }
+ }
+
+ nested, ok := a["nested"].(map[string]interface{})
+ if !ok {
+ t.Fatalf("Map should contain the D field that is tagged as 'nested'")
+ }
+
+ e, ok := nested["jkl"].(string)
+ if !ok {
+ t.Fatalf("Map should contain the D.E field that is tagged as 'jkl'")
+ }
+
+ if e != "e-value" {
+ t.Errorf("D.E field should be equal to 'e-value', got: '%v'", e)
+ }
+
+}
+
+func TestMap_MultipleCustomTag(t *testing.T) {
+ var A = struct {
+ X string `aa:"ax"`
+ }{"a_value"}
+
+ aStruct := New(A)
+ aStruct.TagName = "aa"
+
+ var B = struct {
+ X string `bb:"bx"`
+ }{"b_value"}
+
+ bStruct := New(B)
+ bStruct.TagName = "bb"
+
+ a, b := aStruct.Map(), bStruct.Map()
+ if !reflect.DeepEqual(a, map[string]interface{}{"ax": "a_value"}) {
+ t.Error("Map should have field ax with value a_value")
+ }
+
+ if !reflect.DeepEqual(b, map[string]interface{}{"bx": "b_value"}) {
+ t.Error("Map should have field bx with value b_value")
+ }
+}
+
+func TestMap_OmitEmpty(t *testing.T) {
+ type A struct {
+ Name string
+ Value string `structs:",omitempty"`
+ Time time.Time `structs:",omitempty"`
+ }
+ a := A{}
+
+ m := Map(a)
+
+ _, ok := m["Value"].(map[string]interface{})
+ if ok {
+ t.Error("Map should not contain the Value field that is tagged as omitempty")
+ }
+
+ _, ok = m["Time"].(map[string]interface{})
+ if ok {
+ t.Error("Map should not contain the Time field that is tagged as omitempty")
+ }
+}
+
+func TestMap_OmitNested(t *testing.T) {
+ type A struct {
+ Name string
+ Value string
+ Time time.Time `structs:",omitnested"`
+ }
+ a := A{Time: time.Now()}
+
+ type B struct {
+ Desc string
+ A A
+ }
+ b := &B{A: a}
+
+ m := Map(b)
+
+ in, ok := m["A"].(map[string]interface{})
+ if !ok {
+ t.Error("Map nested structs is not available in the map")
+ }
+
+ // should not happen
+ if _, ok := in["Time"].(map[string]interface{}); ok {
+ t.Error("Map nested struct should omit recursiving parsing of Time")
+ }
+
+ if _, ok := in["Time"].(time.Time); !ok {
+ t.Error("Map nested struct should stop parsing of Time at is current value")
+ }
+}
+
+func TestMap_Nested(t *testing.T) {
+ type A struct {
+ Name string
+ }
+ a := &A{Name: "example"}
+
+ type B struct {
+ A *A
+ }
+ b := &B{A: a}
+
+ m := Map(b)
+
+ if typ := reflect.TypeOf(m).Kind(); typ != reflect.Map {
+ t.Errorf("Map should return a map type, got: %v", typ)
+ }
+
+ in, ok := m["A"].(map[string]interface{})
+ if !ok {
+ t.Error("Map nested structs is not available in the map")
+ }
+
+ if name := in["Name"].(string); name != "example" {
+ t.Errorf("Map nested struct's name field should give example, got: %s", name)
+ }
+}
+
+func TestMap_Anonymous(t *testing.T) {
+ type A struct {
+ Name string
+ }
+ a := &A{Name: "example"}
+
+ type B struct {
+ *A
+ }
+ b := &B{}
+ b.A = a
+
+ m := Map(b)
+
+ if typ := reflect.TypeOf(m).Kind(); typ != reflect.Map {
+ t.Errorf("Map should return a map type, got: %v", typ)
+ }
+
+ in, ok := m["A"].(map[string]interface{})
+ if !ok {
+ t.Error("Embedded structs is not available in the map")
+ }
+
+ if name := in["Name"].(string); name != "example" {
+ t.Errorf("Embedded A struct's Name field should give example, got: %s", name)
+ }
+}
+
+func TestMap_Flatnested(t *testing.T) {
+ type A struct {
+ Name string
+ }
+ a := A{Name: "example"}
+
+ type B struct {
+ A `structs:",flatten"`
+ C int
+ }
+ b := &B{C: 123}
+ b.A = a
+
+ m := Map(b)
+
+ _, ok := m["A"].(map[string]interface{})
+ if ok {
+ t.Error("Embedded A struct with tag flatten has to be flat in the map")
+ }
+
+ expectedMap := map[string]interface{}{"Name": "example", "C": 123}
+ if !reflect.DeepEqual(m, expectedMap) {
+ t.Errorf("The exprected map %+v does't correspond to %+v", expectedMap, m)
+ }
+
+}
+
+func TestMap_FlatnestedOverwrite(t *testing.T) {
+ type A struct {
+ Name string
+ }
+ a := A{Name: "example"}
+
+ type B struct {
+ A `structs:",flatten"`
+ Name string
+ C int
+ }
+ b := &B{C: 123, Name: "bName"}
+ b.A = a
+
+ m := Map(b)
+
+ _, ok := m["A"].(map[string]interface{})
+ if ok {
+ t.Error("Embedded A struct with tag flatten has to be flat in the map")
+ }
+
+ expectedMap := map[string]interface{}{"Name": "bName", "C": 123}
+ if !reflect.DeepEqual(m, expectedMap) {
+ t.Errorf("The exprected map %+v does't correspond to %+v", expectedMap, m)
+ }
+}
+
+func TestMap_TimeField(t *testing.T) {
+ type A struct {
+ CreatedAt time.Time
+ }
+
+ a := &A{CreatedAt: time.Now().UTC()}
+ m := Map(a)
+
+ _, ok := m["CreatedAt"].(time.Time)
+ if !ok {
+ t.Error("Time field must be final")
+ }
+}
+
+func TestFillMap(t *testing.T) {
+ var T = struct {
+ A string
+ B int
+ C bool
+ }{
+ A: "a-value",
+ B: 2,
+ C: true,
+ }
+
+ a := make(map[string]interface{}, 0)
+ FillMap(T, a)
+
+ // we have three fields
+ if len(a) != 3 {
+ t.Errorf("FillMap should fill a map of len 3, got: %d", len(a))
+ }
+
+ inMap := func(val interface{}) bool {
+ for _, v := range a {
+ if reflect.DeepEqual(v, val) {
+ return true
+ }
+ }
+
+ return false
+ }
+
+ for _, val := range []interface{}{"a-value", 2, true} {
+ if !inMap(val) {
+ t.Errorf("FillMap should have the value %v", val)
+ }
+ }
+}
+
+func TestFillMap_Nil(t *testing.T) {
+ var T = struct {
+ A string
+ B int
+ C bool
+ }{
+ A: "a-value",
+ B: 2,
+ C: true,
+ }
+
+ defer func() {
+ err := recover()
+ if err != nil {
+ t.Error("FillMap should not panic if a nil map is passed")
+ }
+ }()
+
+ // nil should no
+ FillMap(T, nil)
+}
+func TestStruct(t *testing.T) {
+ var T = struct{}{}
+
+ if !IsStruct(T) {
+ t.Errorf("T should be a struct, got: %T", T)
+ }
+
+ if !IsStruct(&T) {
+ t.Errorf("T should be a struct, got: %T", T)
+ }
+
+}
+
+func TestValues(t *testing.T) {
+ var T = struct {
+ A string
+ B int
+ C bool
+ }{
+ A: "a-value",
+ B: 2,
+ C: true,
+ }
+
+ s := Values(T)
+
+ if typ := reflect.TypeOf(s).Kind(); typ != reflect.Slice {
+ t.Errorf("Values should return a slice type, got: %v", typ)
+ }
+
+ inSlice := func(val interface{}) bool {
+ for _, v := range s {
+ if reflect.DeepEqual(v, val) {
+ return true
+ }
+ }
+ return false
+ }
+
+ for _, val := range []interface{}{"a-value", 2, true} {
+ if !inSlice(val) {
+ t.Errorf("Values should have the value %v", val)
+ }
+ }
+}
+
+func TestValues_OmitEmpty(t *testing.T) {
+ type A struct {
+ Name string
+ Value int `structs:",omitempty"`
+ }
+
+ a := A{Name: "example"}
+ s := Values(a)
+
+ if len(s) != 1 {
+ t.Errorf("Values of omitted empty fields should be not counted")
+ }
+
+ if s[0].(string) != "example" {
+ t.Errorf("Values of omitted empty fields should left the value example")
+ }
+}
+
+func TestValues_OmitNested(t *testing.T) {
+ type A struct {
+ Name string
+ Value int
+ }
+
+ a := A{
+ Name: "example",
+ Value: 123,
+ }
+
+ type B struct {
+ A A `structs:",omitnested"`
+ C int
+ }
+ b := &B{A: a, C: 123}
+
+ s := Values(b)
+
+ if len(s) != 2 {
+ t.Errorf("Values of omitted nested struct should be not counted")
+ }
+
+ inSlice := func(val interface{}) bool {
+ for _, v := range s {
+ if reflect.DeepEqual(v, val) {
+ return true
+ }
+ }
+ return false
+ }
+
+ for _, val := range []interface{}{123, a} {
+ if !inSlice(val) {
+ t.Errorf("Values should have the value %v", val)
+ }
+ }
+}
+
+func TestValues_Nested(t *testing.T) {
+ type A struct {
+ Name string
+ }
+ a := A{Name: "example"}
+
+ type B struct {
+ A A
+ C int
+ }
+ b := &B{A: a, C: 123}
+
+ s := Values(b)
+
+ inSlice := func(val interface{}) bool {
+ for _, v := range s {
+ if reflect.DeepEqual(v, val) {
+ return true
+ }
+ }
+ return false
+ }
+
+ for _, val := range []interface{}{"example", 123} {
+ if !inSlice(val) {
+ t.Errorf("Values should have the value %v", val)
+ }
+ }
+}
+
+func TestValues_Anonymous(t *testing.T) {
+ type A struct {
+ Name string
+ }
+ a := A{Name: "example"}
+
+ type B struct {
+ A
+ C int
+ }
+ b := &B{C: 123}
+ b.A = a
+
+ s := Values(b)
+
+ inSlice := func(val interface{}) bool {
+ for _, v := range s {
+ if reflect.DeepEqual(v, val) {
+ return true
+ }
+ }
+ return false
+ }
+
+ for _, val := range []interface{}{"example", 123} {
+ if !inSlice(val) {
+ t.Errorf("Values should have the value %v", val)
+ }
+ }
+}
+
+func TestNames(t *testing.T) {
+ var T = struct {
+ A string
+ B int
+ C bool
+ }{
+ A: "a-value",
+ B: 2,
+ C: true,
+ }
+
+ s := Names(T)
+
+ if len(s) != 3 {
+ t.Errorf("Names should return a slice of len 3, got: %d", len(s))
+ }
+
+ inSlice := func(val string) bool {
+ for _, v := range s {
+ if reflect.DeepEqual(v, val) {
+ return true
+ }
+ }
+ return false
+ }
+
+ for _, val := range []string{"A", "B", "C"} {
+ if !inSlice(val) {
+ t.Errorf("Names should have the value %v", val)
+ }
+ }
+}
+
+func TestFields(t *testing.T) {
+ var T = struct {
+ A string
+ B int
+ C bool
+ }{
+ A: "a-value",
+ B: 2,
+ C: true,
+ }
+
+ s := Fields(T)
+
+ if len(s) != 3 {
+ t.Errorf("Fields should return a slice of len 3, got: %d", len(s))
+ }
+
+ inSlice := func(val string) bool {
+ for _, v := range s {
+ if reflect.DeepEqual(v.Name(), val) {
+ return true
+ }
+ }
+ return false
+ }
+
+ for _, val := range []string{"A", "B", "C"} {
+ if !inSlice(val) {
+ t.Errorf("Fields should have the value %v", val)
+ }
+ }
+}
+
+func TestFields_OmitNested(t *testing.T) {
+ type A struct {
+ Name string
+ Enabled bool
+ }
+ a := A{Name: "example"}
+
+ type B struct {
+ A A
+ C int
+ Value string `structs:"-"`
+ Number int
+ }
+ b := &B{A: a, C: 123}
+
+ s := Fields(b)
+
+ if len(s) != 3 {
+ t.Errorf("Fields should omit nested struct. Expecting 2 got: %d", len(s))
+ }
+
+ inSlice := func(val interface{}) bool {
+ for _, v := range s {
+ if reflect.DeepEqual(v.Name(), val) {
+ return true
+ }
+ }
+ return false
+ }
+
+ for _, val := range []interface{}{"A", "C"} {
+ if !inSlice(val) {
+ t.Errorf("Fields should have the value %v", val)
+ }
+ }
+}
+
+func TestFields_Anonymous(t *testing.T) {
+ type A struct {
+ Name string
+ }
+ a := A{Name: "example"}
+
+ type B struct {
+ A
+ C int
+ }
+ b := &B{C: 123}
+ b.A = a
+
+ s := Fields(b)
+
+ inSlice := func(val interface{}) bool {
+ for _, v := range s {
+ if reflect.DeepEqual(v.Name(), val) {
+ return true
+ }
+ }
+ return false
+ }
+
+ for _, val := range []interface{}{"A", "C"} {
+ if !inSlice(val) {
+ t.Errorf("Fields should have the value %v", val)
+ }
+ }
+}
+
+func TestIsZero(t *testing.T) {
+ var T = struct {
+ A string
+ B int
+ C bool `structs:"-"`
+ D []string
+ }{}
+
+ ok := IsZero(T)
+ if !ok {
+ t.Error("IsZero should return true because none of the fields are initialized.")
+ }
+
+ var X = struct {
+ A string
+ F *bool
+ }{
+ A: "a-value",
+ }
+
+ ok = IsZero(X)
+ if ok {
+ t.Error("IsZero should return false because A is initialized")
+ }
+
+ var Y = struct {
+ A string
+ B int
+ }{
+ A: "a-value",
+ B: 123,
+ }
+
+ ok = IsZero(Y)
+ if ok {
+ t.Error("IsZero should return false because A and B is initialized")
+ }
+}
+
+func TestIsZero_OmitNested(t *testing.T) {
+ type A struct {
+ Name string
+ D string
+ }
+ a := A{Name: "example"}
+
+ type B struct {
+ A A `structs:",omitnested"`
+ C int
+ }
+ b := &B{A: a, C: 123}
+
+ ok := IsZero(b)
+ if ok {
+ t.Error("IsZero should return false because A, B and C are initialized")
+ }
+
+ aZero := A{}
+ bZero := &B{A: aZero}
+
+ ok = IsZero(bZero)
+ if !ok {
+ t.Error("IsZero should return true because neither A nor B is initialized")
+ }
+
+}
+
+func TestIsZero_Nested(t *testing.T) {
+ type A struct {
+ Name string
+ D string
+ }
+ a := A{Name: "example"}
+
+ type B struct {
+ A A
+ C int
+ }
+ b := &B{A: a, C: 123}
+
+ ok := IsZero(b)
+ if ok {
+ t.Error("IsZero should return false because A, B and C are initialized")
+ }
+
+ aZero := A{}
+ bZero := &B{A: aZero}
+
+ ok = IsZero(bZero)
+ if !ok {
+ t.Error("IsZero should return true because neither A nor B is initialized")
+ }
+
+}
+
+func TestIsZero_Anonymous(t *testing.T) {
+ type A struct {
+ Name string
+ D string
+ }
+ a := A{Name: "example"}
+
+ type B struct {
+ A
+ C int
+ }
+ b := &B{C: 123}
+ b.A = a
+
+ ok := IsZero(b)
+ if ok {
+ t.Error("IsZero should return false because A, B and C are initialized")
+ }
+
+ aZero := A{}
+ bZero := &B{}
+ bZero.A = aZero
+
+ ok = IsZero(bZero)
+ if !ok {
+ t.Error("IsZero should return true because neither A nor B is initialized")
+ }
+}
+
+func TestHasZero(t *testing.T) {
+ var T = struct {
+ A string
+ B int
+ C bool `structs:"-"`
+ D []string
+ }{
+ A: "a-value",
+ B: 2,
+ }
+
+ ok := HasZero(T)
+ if !ok {
+ t.Error("HasZero should return true because A and B are initialized.")
+ }
+
+ var X = struct {
+ A string
+ F *bool
+ }{
+ A: "a-value",
+ }
+
+ ok = HasZero(X)
+ if !ok {
+ t.Error("HasZero should return true because A is initialized")
+ }
+
+ var Y = struct {
+ A string
+ B int
+ }{
+ A: "a-value",
+ B: 123,
+ }
+
+ ok = HasZero(Y)
+ if ok {
+ t.Error("HasZero should return false because A and B is initialized")
+ }
+}
+
+func TestHasZero_OmitNested(t *testing.T) {
+ type A struct {
+ Name string
+ D string
+ }
+ a := A{Name: "example"}
+
+ type B struct {
+ A A `structs:",omitnested"`
+ C int
+ }
+ b := &B{A: a, C: 123}
+
+ // Because the Field A inside B is omitted HasZero should return false
+ // because it will stop iterating deeper andnot going to lookup for D
+ ok := HasZero(b)
+ if ok {
+ t.Error("HasZero should return false because A and C are initialized")
+ }
+}
+
+func TestHasZero_Nested(t *testing.T) {
+ type A struct {
+ Name string
+ D string
+ }
+ a := A{Name: "example"}
+
+ type B struct {
+ A A
+ C int
+ }
+ b := &B{A: a, C: 123}
+
+ ok := HasZero(b)
+ if !ok {
+ t.Error("HasZero should return true because D is not initialized")
+ }
+}
+
+func TestHasZero_Anonymous(t *testing.T) {
+ type A struct {
+ Name string
+ D string
+ }
+ a := A{Name: "example"}
+
+ type B struct {
+ A
+ C int
+ }
+ b := &B{C: 123}
+ b.A = a
+
+ ok := HasZero(b)
+ if !ok {
+ t.Error("HasZero should return false because D is not initialized")
+ }
+}
+
+func TestName(t *testing.T) {
+ type Foo struct {
+ A string
+ B bool
+ }
+ f := &Foo{}
+
+ n := Name(f)
+ if n != "Foo" {
+ t.Errorf("Name should return Foo, got: %s", n)
+ }
+
+ unnamed := struct{ Name string }{Name: "Cihangir"}
+ m := Name(unnamed)
+ if m != "" {
+ t.Errorf("Name should return empty string for unnamed struct, got: %s", n)
+ }
+
+ defer func() {
+ err := recover()
+ if err == nil {
+ t.Error("Name should panic if a non struct is passed")
+ }
+ }()
+
+ Name([]string{})
+}
+
+func TestNestedNilPointer(t *testing.T) {
+ type Collar struct {
+ Engraving string
+ }
+
+ type Dog struct {
+ Name string
+ Collar *Collar
+ }
+
+ type Person struct {
+ Name string
+ Dog *Dog
+ }
+
+ person := &Person{
+ Name: "John",
+ }
+
+ personWithDog := &Person{
+ Name: "Ron",
+ Dog: &Dog{
+ Name: "Rover",
+ },
+ }
+
+ personWithDogWithCollar := &Person{
+ Name: "Kon",
+ Dog: &Dog{
+ Name: "Ruffles",
+ Collar: &Collar{
+ Engraving: "If lost, call Kon",
+ },
+ },
+ }
+
+ defer func() {
+ err := recover()
+ if err != nil {
+ fmt.Printf("err %+v\n", err)
+ t.Error("Internal nil pointer should not panic")
+ }
+ }()
+
+ _ = Map(person) // Panics
+ _ = Map(personWithDog) // Panics
+ _ = Map(personWithDogWithCollar) // Doesn't panic
+}
+
+type Person struct {
+ Name string
+ Age int
+}
+
+func (p *Person) String() string {
+ return fmt.Sprintf("%s(%d)", p.Name, p.Age)
+}
+
+func TestTagWithStringOption(t *testing.T) {
+
+ type Address struct {
+ Country string `json:"country"`
+ Person *Person `json:"person,string"`
+ }
+
+ person := &Person{
+ Name: "John",
+ Age: 23,
+ }
+
+ address := &Address{
+ Country: "EU",
+ Person: person,
+ }
+
+ defer func() {
+ err := recover()
+ if err != nil {
+ fmt.Printf("err %+v\n", err)
+ t.Error("Internal nil pointer should not panic")
+ }
+ }()
+
+ s := New(address)
+
+ s.TagName = "json"
+ m := s.Map()
+
+ if m["person"] != person.String() {
+ t.Errorf("Value for field person should be %s, got: %s", person.String(), m["person"])
+ }
+
+ vs := s.Values()
+ if vs[1] != person.String() {
+ t.Errorf("Value for 2nd field (person) should be %T, got: %T", person.String(), vs[1])
+ }
+}
+
+type Animal struct {
+ Name string
+ Age int
+}
+
+type Dog struct {
+ Animal *Animal `json:"animal,string"`
+}
+
+func TestNonStringerTagWithStringOption(t *testing.T) {
+ a := &Animal{
+ Name: "Fluff",
+ Age: 4,
+ }
+
+ d := &Dog{
+ Animal: a,
+ }
+
+ defer func() {
+ err := recover()
+ if err != nil {
+ fmt.Printf("err %+v\n", err)
+ t.Error("Internal nil pointer should not panic")
+ }
+ }()
+
+ s := New(d)
+
+ s.TagName = "json"
+ m := s.Map()
+
+ if _, exists := m["animal"]; exists {
+ t.Errorf("Value for field Animal should not exist")
+ }
+}
diff --git a/vendor/github.com/fatih/structs/tags_test.go b/vendor/github.com/fatih/structs/tags_test.go
new file mode 100644
index 0000000..5d12724
--- /dev/null
+++ b/vendor/github.com/fatih/structs/tags_test.go
@@ -0,0 +1,46 @@
+package structs
+
+import "testing"
+
+func TestParseTag_Name(t *testing.T) {
+ tags := []struct {
+ tag string
+ has bool
+ }{
+ {"", false},
+ {"name", true},
+ {"name,opt", true},
+ {"name , opt, opt2", false}, // has a single whitespace
+ {", opt, opt2", false},
+ }
+
+ for _, tag := range tags {
+ name, _ := parseTag(tag.tag)
+
+ if (name != "name") && tag.has {
+ t.Errorf("Parse tag should return name: %#v", tag)
+ }
+ }
+}
+
+func TestParseTag_Opts(t *testing.T) {
+ tags := []struct {
+ opts string
+ has bool
+ }{
+ {"name", false},
+ {"name,opt", true},
+ {"name , opt, opt2", false}, // has a single whitespace
+ {",opt, opt2", true},
+ {", opt3, opt4", false},
+ }
+
+ // search for "opt"
+ for _, tag := range tags {
+ _, opts := parseTag(tag.opts)
+
+ if opts.Has("opt") != tag.has {
+ t.Errorf("Tag opts should have opt: %#v", tag)
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/errwrap/errwrap_test.go b/vendor/github.com/hashicorp/errwrap/errwrap_test.go
new file mode 100644
index 0000000..5ae5f8e
--- /dev/null
+++ b/vendor/github.com/hashicorp/errwrap/errwrap_test.go
@@ -0,0 +1,94 @@
+package errwrap
+
+import (
+ "fmt"
+ "testing"
+)
+
+func TestWrappedError_impl(t *testing.T) {
+ var _ error = new(wrappedError)
+}
+
+func TestGetAll(t *testing.T) {
+ cases := []struct {
+ Err error
+ Msg string
+ Len int
+ }{
+ {},
+ {
+ fmt.Errorf("foo"),
+ "foo",
+ 1,
+ },
+ {
+ fmt.Errorf("bar"),
+ "foo",
+ 0,
+ },
+ {
+ Wrapf("bar", fmt.Errorf("foo")),
+ "foo",
+ 1,
+ },
+ {
+ Wrapf("{{err}}", fmt.Errorf("foo")),
+ "foo",
+ 2,
+ },
+ {
+ Wrapf("bar", Wrapf("baz", fmt.Errorf("foo"))),
+ "foo",
+ 1,
+ },
+ }
+
+ for i, tc := range cases {
+ actual := GetAll(tc.Err, tc.Msg)
+ if len(actual) != tc.Len {
+ t.Fatalf("%d: bad: %#v", i, actual)
+ }
+ for _, v := range actual {
+ if v.Error() != tc.Msg {
+ t.Fatalf("%d: bad: %#v", i, actual)
+ }
+ }
+ }
+}
+
+func TestGetAllType(t *testing.T) {
+ cases := []struct {
+ Err error
+ Type interface{}
+ Len int
+ }{
+ {},
+ {
+ fmt.Errorf("foo"),
+ "foo",
+ 0,
+ },
+ {
+ fmt.Errorf("bar"),
+ fmt.Errorf("foo"),
+ 1,
+ },
+ {
+ Wrapf("bar", fmt.Errorf("foo")),
+ fmt.Errorf("baz"),
+ 2,
+ },
+ {
+ Wrapf("bar", Wrapf("baz", fmt.Errorf("foo"))),
+ Wrapf("", nil),
+ 0,
+ },
+ }
+
+ for i, tc := range cases {
+ actual := GetAllType(tc.Err, tc.Type)
+ if len(actual) != tc.Len {
+ t.Fatalf("%d: bad: %#v", i, actual)
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/append_test.go b/vendor/github.com/hashicorp/go-multierror/append_test.go
new file mode 100644
index 0000000..dfa79e2
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/append_test.go
@@ -0,0 +1,64 @@
+package multierror
+
+import (
+ "errors"
+ "testing"
+)
+
+func TestAppend_Error(t *testing.T) {
+ original := &Error{
+ Errors: []error{errors.New("foo")},
+ }
+
+ result := Append(original, errors.New("bar"))
+ if len(result.Errors) != 2 {
+ t.Fatalf("wrong len: %d", len(result.Errors))
+ }
+
+ original = &Error{}
+ result = Append(original, errors.New("bar"))
+ if len(result.Errors) != 1 {
+ t.Fatalf("wrong len: %d", len(result.Errors))
+ }
+
+ // Test when a typed nil is passed
+ var e *Error
+ result = Append(e, errors.New("baz"))
+ if len(result.Errors) != 1 {
+ t.Fatalf("wrong len: %d", len(result.Errors))
+ }
+
+ // Test flattening
+ original = &Error{
+ Errors: []error{errors.New("foo")},
+ }
+
+ result = Append(original, Append(nil, errors.New("foo"), errors.New("bar")))
+ if len(result.Errors) != 3 {
+ t.Fatalf("wrong len: %d", len(result.Errors))
+ }
+}
+
+func TestAppend_NilError(t *testing.T) {
+ var err error
+ result := Append(err, errors.New("bar"))
+ if len(result.Errors) != 1 {
+ t.Fatalf("wrong len: %d", len(result.Errors))
+ }
+}
+
+func TestAppend_NonError(t *testing.T) {
+ original := errors.New("foo")
+ result := Append(original, errors.New("bar"))
+ if len(result.Errors) != 2 {
+ t.Fatalf("wrong len: %d", len(result.Errors))
+ }
+}
+
+func TestAppend_NonError_Error(t *testing.T) {
+ original := errors.New("foo")
+ result := Append(original, Append(nil, errors.New("bar")))
+ if len(result.Errors) != 2 {
+ t.Fatalf("wrong len: %d", len(result.Errors))
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/flatten_test.go b/vendor/github.com/hashicorp/go-multierror/flatten_test.go
new file mode 100644
index 0000000..75218f1
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/flatten_test.go
@@ -0,0 +1,48 @@
+package multierror
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+func TestFlatten(t *testing.T) {
+ original := &Error{
+ Errors: []error{
+ errors.New("one"),
+ &Error{
+ Errors: []error{
+ errors.New("two"),
+ &Error{
+ Errors: []error{
+ errors.New("three"),
+ },
+ },
+ },
+ },
+ },
+ }
+
+ expected := strings.TrimSpace(`
+3 error(s) occurred:
+
+* one
+* two
+* three
+ `)
+ actual := fmt.Sprintf("%s", Flatten(original))
+
+ if expected != actual {
+ t.Fatalf("expected: %s, got: %s", expected, actual)
+ }
+}
+
+func TestFlatten_nonError(t *testing.T) {
+ err := errors.New("foo")
+ actual := Flatten(err)
+ if !reflect.DeepEqual(actual, err) {
+ t.Fatalf("bad: %#v", actual)
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/format_test.go b/vendor/github.com/hashicorp/go-multierror/format_test.go
new file mode 100644
index 0000000..d7cee5d
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/format_test.go
@@ -0,0 +1,23 @@
+package multierror
+
+import (
+ "errors"
+ "testing"
+)
+
+func TestListFormatFunc(t *testing.T) {
+ expected := `2 error(s) occurred:
+
+* foo
+* bar`
+
+ errors := []error{
+ errors.New("foo"),
+ errors.New("bar"),
+ }
+
+ actual := ListFormatFunc(errors)
+ if actual != expected {
+ t.Fatalf("bad: %#v", actual)
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/multierror_test.go b/vendor/github.com/hashicorp/go-multierror/multierror_test.go
new file mode 100644
index 0000000..3e78079
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/multierror_test.go
@@ -0,0 +1,70 @@
+package multierror
+
+import (
+ "errors"
+ "reflect"
+ "testing"
+)
+
+func TestError_Impl(t *testing.T) {
+ var _ error = new(Error)
+}
+
+func TestErrorError_custom(t *testing.T) {
+ errors := []error{
+ errors.New("foo"),
+ errors.New("bar"),
+ }
+
+ fn := func(es []error) string {
+ return "foo"
+ }
+
+ multi := &Error{Errors: errors, ErrorFormat: fn}
+ if multi.Error() != "foo" {
+ t.Fatalf("bad: %s", multi.Error())
+ }
+}
+
+func TestErrorError_default(t *testing.T) {
+ expected := `2 error(s) occurred:
+
+* foo
+* bar`
+
+ errors := []error{
+ errors.New("foo"),
+ errors.New("bar"),
+ }
+
+ multi := &Error{Errors: errors}
+ if multi.Error() != expected {
+ t.Fatalf("bad: %s", multi.Error())
+ }
+}
+
+func TestErrorErrorOrNil(t *testing.T) {
+ err := new(Error)
+ if err.ErrorOrNil() != nil {
+ t.Fatalf("bad: %#v", err.ErrorOrNil())
+ }
+
+ err.Errors = []error{errors.New("foo")}
+ if v := err.ErrorOrNil(); v == nil {
+ t.Fatal("should not be nil")
+ } else if !reflect.DeepEqual(v, err) {
+ t.Fatalf("bad: %#v", v)
+ }
+}
+
+func TestErrorWrappedErrors(t *testing.T) {
+ errors := []error{
+ errors.New("foo"),
+ errors.New("bar"),
+ }
+
+ multi := &Error{Errors: errors}
+ if !reflect.DeepEqual(multi.Errors, multi.WrappedErrors()) {
+ t.Fatalf("bad: %s", multi.WrappedErrors())
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/prefix_test.go b/vendor/github.com/hashicorp/go-multierror/prefix_test.go
new file mode 100644
index 0000000..1d4a6f6
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/prefix_test.go
@@ -0,0 +1,33 @@
+package multierror
+
+import (
+ "errors"
+ "testing"
+)
+
+func TestPrefix_Error(t *testing.T) {
+ original := &Error{
+ Errors: []error{errors.New("foo")},
+ }
+
+ result := Prefix(original, "bar")
+ if result.(*Error).Errors[0].Error() != "bar foo" {
+ t.Fatalf("bad: %s", result)
+ }
+}
+
+func TestPrefix_NilError(t *testing.T) {
+ var err error
+ result := Prefix(err, "bar")
+ if result != nil {
+ t.Fatalf("bad: %#v", result)
+ }
+}
+
+func TestPrefix_NonError(t *testing.T) {
+ original := errors.New("foo")
+ result := Prefix(original, "bar")
+ if result.Error() != "bar foo" {
+ t.Fatalf("bad: %s", result)
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin_test.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin_test.go
new file mode 100644
index 0000000..2129c15
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin_test.go
@@ -0,0 +1,17 @@
+package rootcerts
+
+import "testing"
+
+func TestSystemCAsOnDarwin(t *testing.T) {
+ _, err := LoadSystemCAs()
+ if err != nil {
+ t.Fatalf("Got error: %s", err)
+ }
+}
+
+func TestCertKeychains(t *testing.T) {
+ keychains := certKeychains()
+ if len(keychains) != 3 {
+ t.Fatalf("Expected 3 keychains, got %#v", keychains)
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_test.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_test.go
new file mode 100644
index 0000000..9634385
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_test.go
@@ -0,0 +1,52 @@
+package rootcerts
+
+import (
+ "path/filepath"
+ "testing"
+)
+
+const fixturesDir = "./test-fixtures"
+
+func TestConfigureTLSHandlesNil(t *testing.T) {
+ err := ConfigureTLS(nil, nil)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+}
+
+func TestLoadCACertsHandlesNil(t *testing.T) {
+ _, err := LoadCACerts(nil)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+}
+
+func TestLoadCACertsFromFile(t *testing.T) {
+ path := testFixture("cafile", "cacert.pem")
+ _, err := LoadCACerts(&Config{CAFile: path})
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+}
+
+func TestLoadCACertsFromDir(t *testing.T) {
+ path := testFixture("capath")
+ _, err := LoadCACerts(&Config{CAPath: path})
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+}
+
+func TestLoadCACertsFromDirWithSymlinks(t *testing.T) {
+ path := testFixture("capath-with-symlinks")
+ _, err := LoadCACerts(&Config{CAPath: path})
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+}
+
+func testFixture(n ...string) string {
+ parts := []string{fixturesDir}
+ parts = append(parts, n...)
+ return filepath.Join(parts...)
+}
diff --git a/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/cafile/cacert.pem b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/cafile/cacert.pem
new file mode 100644
index 0000000..86d732f
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/cafile/cacert.pem
@@ -0,0 +1,28 @@
+-----BEGIN CERTIFICATE-----
+MIIExDCCA6ygAwIBAgIJAJ7PV+3kJZqZMA0GCSqGSIb3DQEBBQUAMIGcMQswCQYD
+VQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xEjAQ
+BgNVBAoTCUhhc2hpQ29ycDEUMBIGA1UECxMLRW5naW5lZXJpbmcxGzAZBgNVBAMU
+EiouYXRsYXMucGhpbnplLmNvbTEhMB8GCSqGSIb3DQEJARYScGF1bEBoYXNoaWNv
+cnAuY29tMB4XDTE2MDQyNzE1MjYyMVoXDTE3MDQyNzE1MjYyMVowgZwxCzAJBgNV
+BAYTAlVTMREwDwYDVQQIEwhJbGxpbm9pczEQMA4GA1UEBxMHQ2hpY2FnbzESMBAG
+A1UEChMJSGFzaGlDb3JwMRQwEgYDVQQLEwtFbmdpbmVlcmluZzEbMBkGA1UEAxQS
+Ki5hdGxhcy5waGluemUuY29tMSEwHwYJKoZIhvcNAQkBFhJwYXVsQGhhc2hpY29y
+cC5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDWRXdMnsTpxpwZ
+D2olsun9WO7SnMQ/SIR3DV/fttPIDHSQm2ad4r2pKEuiV+TKEFUgj/Id9bCAfQYs
+jsa1qX1GmieXz+83OnK3MDEcDczpjGhSplTYGOjlxKLMhMBAOtdV5hJAYz3nwV3c
+R+IQu/4213+em40shZAQRNZ2apnyE3+QB+gPlEs9Nw0OcbSKLmAiuKPbJpO+94ou
+n1h0/w/+DPz6yO/fFPoA3vlisGM6B4R9U2JVwWjXrU71fU1i82ulFQdApdfUs1FP
+wRrZxgX5ldUrRvFr8lJiMehdX8khO7Ue4rT6yxbI6KVM04Q5mNt1ARRLI69rN9My
+pGXiItcxAgMBAAGjggEFMIIBATAdBgNVHQ4EFgQUjwsj8l0Y9HFQLH0GaJAsOHof
+PhwwgdEGA1UdIwSByTCBxoAUjwsj8l0Y9HFQLH0GaJAsOHofPhyhgaKkgZ8wgZwx
+CzAJBgNVBAYTAlVTMREwDwYDVQQIEwhJbGxpbm9pczEQMA4GA1UEBxMHQ2hpY2Fn
+bzESMBAGA1UEChMJSGFzaGlDb3JwMRQwEgYDVQQLEwtFbmdpbmVlcmluZzEbMBkG
+A1UEAxQSKi5hdGxhcy5waGluemUuY29tMSEwHwYJKoZIhvcNAQkBFhJwYXVsQGhh
+c2hpY29ycC5jb22CCQCez1ft5CWamTAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEB
+BQUAA4IBAQC4tFfxpB8xEk9ewb5CNhhac4oKwGths+oq45DjoNtlagDMmIs2bl18
+q45PIB7fuFkAz/YHcOL0UEOAiw4jbuROp9jacHxBV21lRLLmNlK1Llc3eNVvLJ38
+ud6/Skilv9XyC4JNk0P5KrghxR6SOGwRuYZNqF+tthf+Bp9wJvLyfqDuJfGBal7C
+ezobMoh4tp8Dh1JeQlwvJcVt2k0UFJpa57MNr78c684Bq55ow+jd6wFG0XM0MMmy
+u+QRgJEGfYuYDPFEO8C8IfRyrHuV7Ll9P6eyEEFCneznXY0yJc/Gn3ZcX7ANqJsc
+ueMOWw/vUnonzxAFKW+I9U9ptyVSNMLY
+-----END CERTIFICATE-----
diff --git a/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/securetrust.pem b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/securetrust.pem
new file mode 120000
index 0000000..dda0574
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/securetrust.pem
@@ -0,0 +1 @@
+../capath/securetrust.pem
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/thawte.pem b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/thawte.pem
new file mode 120000
index 0000000..37ed4f0
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/thawte.pem
@@ -0,0 +1 @@
+../capath/thawte.pem
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath/securetrust.pem b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath/securetrust.pem
new file mode 100644
index 0000000..3740092
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath/securetrust.pem
@@ -0,0 +1,22 @@
+-----BEGIN CERTIFICATE-----
+MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBI
+MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x
+FzAVBgNVBAMTDlNlY3VyZVRydXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIz
+MTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAeBgNVBAoTF1NlY3VyZVRydXN0IENv
+cnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQXOZEz
+Zum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO
+0gMdA+9tDWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIao
+wW8xQmxSPmjL8xk037uHGFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj
+7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b01k/unK8RCSc43Oz969XL0Imnal0ugBS
+8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmHursCAwEAAaOBnTCBmjAT
+BgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB
+/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCeg
+JYYjaHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGC
+NxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt3
+6Z3q059c4EVlew3KW+JwULKUBRSuSceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/
+3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHfmbx8IVQr5Fiiu1cprp6poxkm
+D5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZnMUFdAvnZyPS
+CPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR
+3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE=
+-----END CERTIFICATE-----
diff --git a/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath/thawte.pem b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath/thawte.pem
new file mode 100644
index 0000000..998460f
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath/thawte.pem
@@ -0,0 +1,25 @@
+-----BEGIN CERTIFICATE-----
+MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCB
+qTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf
+Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw
+MDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNV
+BAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3MDAwMDAwWhcNMzYw
+NzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5j
+LjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYG
+A1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl
+IG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqG
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCsoPD7gFnUnMekz52hWXMJEEUMDSxuaPFs
+W0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ1CRfBsDMRJSUjQJib+ta
+3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGcq/gcfomk
+6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6
+Sk/KaAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94J
+NqR32HuHUETVPm4pafs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBA
+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XP
+r87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUFAAOCAQEAeRHAS7ORtvzw6WfU
+DW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeEuzLlQRHAd9mz
+YJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX
+xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2
+/qxAeeWsEG89jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/
+LHbTY5xZ3Y+m4Q6gLkH3LpVHz7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7
+jVaMaA==
+-----END CERTIFICATE-----
diff --git a/vendor/github.com/hashicorp/hcl/decoder_test.go b/vendor/github.com/hashicorp/hcl/decoder_test.go
new file mode 100644
index 0000000..5a8404c
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/decoder_test.go
@@ -0,0 +1,1089 @@
+package hcl
+
+import (
+ "io/ioutil"
+ "path/filepath"
+ "reflect"
+ "testing"
+
+ "github.com/davecgh/go-spew/spew"
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/testhelper"
+)
+
+func TestDecode_interface(t *testing.T) {
+ cases := []struct {
+ File string
+ Err bool
+ Out interface{}
+ }{
+ {
+ "basic.hcl",
+ false,
+ map[string]interface{}{
+ "foo": "bar",
+ "bar": "${file(\"bing/bong.txt\")}",
+ },
+ },
+ {
+ "basic_squish.hcl",
+ false,
+ map[string]interface{}{
+ "foo": "bar",
+ "bar": "${file(\"bing/bong.txt\")}",
+ "foo-bar": "baz",
+ },
+ },
+ {
+ "empty.hcl",
+ false,
+ map[string]interface{}{
+ "resource": []map[string]interface{}{
+ map[string]interface{}{
+ "foo": []map[string]interface{}{
+ map[string]interface{}{},
+ },
+ },
+ },
+ },
+ },
+ {
+ "tfvars.hcl",
+ false,
+ map[string]interface{}{
+ "regularvar": "Should work",
+ "map.key1": "Value",
+ "map.key2": "Other value",
+ },
+ },
+ {
+ "escape.hcl",
+ false,
+ map[string]interface{}{
+ "foo": "bar\"baz\\n",
+ "qux": "back\\slash",
+ "bar": "new\nline",
+ "qax": `slash\:colon`,
+ "nested": `${HH\:mm\:ss}`,
+ "nestedquotes": `${"\"stringwrappedinquotes\""}`,
+ },
+ },
+ {
+ "float.hcl",
+ false,
+ map[string]interface{}{
+ "a": 1.02,
+ },
+ },
+ {
+ "multiline_bad.hcl",
+ true,
+ nil,
+ },
+ {
+ "multiline_literal.hcl",
+ false,
+ map[string]interface{}{"multiline_literal": testhelper.Unix2dos(`hello
+ world`)},
+ },
+ {
+ "multiline_no_marker.hcl",
+ true,
+ nil,
+ },
+ {
+ "multiline.hcl",
+ false,
+ map[string]interface{}{"foo": testhelper.Unix2dos("bar\nbaz\n")},
+ },
+ {
+ "multiline_indented.hcl",
+ false,
+ map[string]interface{}{"foo": testhelper.Unix2dos(" bar\n baz\n")},
+ },
+ {
+ "multiline_no_hanging_indent.hcl",
+ false,
+ map[string]interface{}{"foo": testhelper.Unix2dos(" baz\n bar\n foo\n")},
+ },
+ {
+ "multiline_no_eof.hcl",
+ false,
+ map[string]interface{}{"foo": testhelper.Unix2dos("bar\nbaz\n"), "key": "value"},
+ },
+ {
+ "multiline.json",
+ false,
+ map[string]interface{}{"foo": "bar\nbaz"},
+ },
+ {
+ "null_strings.json",
+ false,
+ map[string]interface{}{
+ "module": []map[string]interface{}{
+ map[string]interface{}{
+ "app": []map[string]interface{}{
+ map[string]interface{}{"foo": ""},
+ },
+ },
+ },
+ },
+ },
+ {
+ "scientific.json",
+ false,
+ map[string]interface{}{
+ "a": 1e-10,
+ "b": 1e+10,
+ "c": 1e10,
+ "d": 1.2e-10,
+ "e": 1.2e+10,
+ "f": 1.2e10,
+ },
+ },
+ {
+ "scientific.hcl",
+ false,
+ map[string]interface{}{
+ "a": 1e-10,
+ "b": 1e+10,
+ "c": 1e10,
+ "d": 1.2e-10,
+ "e": 1.2e+10,
+ "f": 1.2e10,
+ },
+ },
+ {
+ "terraform_heroku.hcl",
+ false,
+ map[string]interface{}{
+ "name": "terraform-test-app",
+ "config_vars": []map[string]interface{}{
+ map[string]interface{}{
+ "FOO": "bar",
+ },
+ },
+ },
+ },
+ {
+ "structure_multi.hcl",
+ false,
+ map[string]interface{}{
+ "foo": []map[string]interface{}{
+ map[string]interface{}{
+ "baz": []map[string]interface{}{
+ map[string]interface{}{"key": 7},
+ },
+ },
+ map[string]interface{}{
+ "bar": []map[string]interface{}{
+ map[string]interface{}{"key": 12},
+ },
+ },
+ },
+ },
+ },
+ {
+ "structure_multi.json",
+ false,
+ map[string]interface{}{
+ "foo": []map[string]interface{}{
+ map[string]interface{}{
+ "baz": []map[string]interface{}{
+ map[string]interface{}{"key": 7},
+ },
+ },
+ map[string]interface{}{
+ "bar": []map[string]interface{}{
+ map[string]interface{}{"key": 12},
+ },
+ },
+ },
+ },
+ },
+ {
+ "list_of_maps.hcl",
+ false,
+ map[string]interface{}{
+ "foo": []interface{}{
+ map[string]interface{}{"somekey1": "someval1"},
+ map[string]interface{}{"somekey2": "someval2", "someextrakey": "someextraval"},
+ },
+ },
+ },
+ {
+ "assign_deep.hcl",
+ false,
+ map[string]interface{}{
+ "resource": []interface{}{
+ map[string]interface{}{
+ "foo": []interface{}{
+ map[string]interface{}{
+ "bar": []map[string]interface{}{
+ map[string]interface{}{}}}}}}},
+ },
+ {
+ "structure_list.hcl",
+ false,
+ map[string]interface{}{
+ "foo": []map[string]interface{}{
+ map[string]interface{}{
+ "key": 7,
+ },
+ map[string]interface{}{
+ "key": 12,
+ },
+ },
+ },
+ },
+ {
+ "structure_list.json",
+ false,
+ map[string]interface{}{
+ "foo": []map[string]interface{}{
+ map[string]interface{}{
+ "key": 7,
+ },
+ map[string]interface{}{
+ "key": 12,
+ },
+ },
+ },
+ },
+ {
+ "structure_list_deep.json",
+ false,
+ map[string]interface{}{
+ "bar": []map[string]interface{}{
+ map[string]interface{}{
+ "foo": []map[string]interface{}{
+ map[string]interface{}{
+ "name": "terraform_example",
+ "ingress": []map[string]interface{}{
+ map[string]interface{}{
+ "from_port": 22,
+ },
+ map[string]interface{}{
+ "from_port": 80,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+
+ {
+ "nested_block_comment.hcl",
+ false,
+ map[string]interface{}{
+ "bar": "value",
+ },
+ },
+
+ {
+ "unterminated_block_comment.hcl",
+ true,
+ nil,
+ },
+
+ {
+ "unterminated_brace.hcl",
+ true,
+ nil,
+ },
+
+ {
+ "nested_provider_bad.hcl",
+ true,
+ nil,
+ },
+
+ {
+ "object_list.json",
+ false,
+ map[string]interface{}{
+ "resource": []map[string]interface{}{
+ map[string]interface{}{
+ "aws_instance": []map[string]interface{}{
+ map[string]interface{}{
+ "db": []map[string]interface{}{
+ map[string]interface{}{
+ "vpc": "foo",
+ "provisioner": []map[string]interface{}{
+ map[string]interface{}{
+ "file": []map[string]interface{}{
+ map[string]interface{}{
+ "source": "foo",
+ "destination": "bar",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+
+ // Terraform GH-8295 sanity test that basic decoding into
+ // interface{} works.
+ {
+ "terraform_variable_invalid.json",
+ false,
+ map[string]interface{}{
+ "variable": []map[string]interface{}{
+ map[string]interface{}{
+ "whatever": "abc123",
+ },
+ },
+ },
+ },
+
+ {
+ "interpolate.json",
+ false,
+ map[string]interface{}{
+ "default": `${replace("europe-west", "-", " ")}`,
+ },
+ },
+
+ {
+ "block_assign.hcl",
+ true,
+ nil,
+ },
+ }
+
+ for _, tc := range cases {
+ t.Logf("Testing: %s", tc.File)
+ d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.File))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ var out interface{}
+ err = Decode(&out, string(d))
+ if (err != nil) != tc.Err {
+ t.Fatalf("Input: %s\n\nError: %s", tc.File, err)
+ }
+
+ if !reflect.DeepEqual(out, tc.Out) {
+ t.Fatalf("Input: %s. Actual, Expected.\n\n%#v\n\n%#v", tc.File, out, tc.Out)
+ }
+
+ var v interface{}
+ err = Unmarshal(d, &v)
+ if (err != nil) != tc.Err {
+ t.Fatalf("Input: %s\n\nError: %s", tc.File, err)
+ }
+
+ if !reflect.DeepEqual(v, tc.Out) {
+ t.Fatalf("Input: %s. Actual, Expected.\n\n%#v\n\n%#v", tc.File, out, tc.Out)
+ }
+ }
+}
+
+func TestDecode_interfaceInline(t *testing.T) {
+ cases := []struct {
+ Value string
+ Err bool
+ Out interface{}
+ }{
+ {"t t e{{}}", true, nil},
+ {"t=0t d {}", true, map[string]interface{}{"t": 0}},
+ {"v=0E0v d{}", true, map[string]interface{}{"v": float64(0)}},
+ }
+
+ for _, tc := range cases {
+ t.Logf("Testing: %q", tc.Value)
+
+ var out interface{}
+ err := Decode(&out, tc.Value)
+ if (err != nil) != tc.Err {
+ t.Fatalf("Input: %q\n\nError: %s", tc.Value, err)
+ }
+
+ if !reflect.DeepEqual(out, tc.Out) {
+ t.Fatalf("Input: %q. Actual, Expected.\n\n%#v\n\n%#v", tc.Value, out, tc.Out)
+ }
+
+ var v interface{}
+ err = Unmarshal([]byte(tc.Value), &v)
+ if (err != nil) != tc.Err {
+ t.Fatalf("Input: %q\n\nError: %s", tc.Value, err)
+ }
+
+ if !reflect.DeepEqual(v, tc.Out) {
+ t.Fatalf("Input: %q. Actual, Expected.\n\n%#v\n\n%#v", tc.Value, out, tc.Out)
+ }
+ }
+}
+
+func TestDecode_equal(t *testing.T) {
+ cases := []struct {
+ One, Two string
+ }{
+ {
+ "basic.hcl",
+ "basic.json",
+ },
+ {
+ "float.hcl",
+ "float.json",
+ },
+ /*
+ {
+ "structure.hcl",
+ "structure.json",
+ },
+ */
+ {
+ "structure.hcl",
+ "structure_flat.json",
+ },
+ {
+ "terraform_heroku.hcl",
+ "terraform_heroku.json",
+ },
+ }
+
+ for _, tc := range cases {
+ p1 := filepath.Join(fixtureDir, tc.One)
+ p2 := filepath.Join(fixtureDir, tc.Two)
+
+ d1, err := ioutil.ReadFile(p1)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ d2, err := ioutil.ReadFile(p2)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ var i1, i2 interface{}
+ err = Decode(&i1, string(d1))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ err = Decode(&i2, string(d2))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if !reflect.DeepEqual(i1, i2) {
+ t.Fatalf(
+ "%s != %s\n\n%#v\n\n%#v",
+ tc.One, tc.Two,
+ i1, i2)
+ }
+ }
+}
+
+func TestDecode_flatMap(t *testing.T) {
+ var val map[string]map[string]string
+
+ err := Decode(&val, testReadFile(t, "structure_flatmap.hcl"))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ expected := map[string]map[string]string{
+ "foo": map[string]string{
+ "foo": "bar",
+ "key": "7",
+ },
+ }
+
+ if !reflect.DeepEqual(val, expected) {
+ t.Fatalf("Actual: %#v\n\nExpected: %#v", val, expected)
+ }
+}
+
+func TestDecode_structure(t *testing.T) {
+ type Embedded interface{}
+
+ type V struct {
+ Embedded `hcl:"-"`
+ Key int
+ Foo string
+ }
+
+ var actual V
+
+ err := Decode(&actual, testReadFile(t, "flat.hcl"))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ expected := V{
+ Key: 7,
+ Foo: "bar",
+ }
+
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("Actual: %#v\n\nExpected: %#v", actual, expected)
+ }
+}
+
+func TestDecode_structurePtr(t *testing.T) {
+ type V struct {
+ Key int
+ Foo string
+ }
+
+ var actual *V
+
+ err := Decode(&actual, testReadFile(t, "flat.hcl"))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ expected := &V{
+ Key: 7,
+ Foo: "bar",
+ }
+
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("Actual: %#v\n\nExpected: %#v", actual, expected)
+ }
+}
+
+func TestDecode_structureArray(t *testing.T) {
+ // This test is extracted from a failure in Consul (consul.io),
+ // hence the interesting structure naming.
+
+ type KeyPolicyType string
+
+ type KeyPolicy struct {
+ Prefix string `hcl:",key"`
+ Policy KeyPolicyType
+ }
+
+ type Policy struct {
+ Keys []KeyPolicy `hcl:"key,expand"`
+ }
+
+ expected := Policy{
+ Keys: []KeyPolicy{
+ KeyPolicy{
+ Prefix: "",
+ Policy: "read",
+ },
+ KeyPolicy{
+ Prefix: "foo/",
+ Policy: "write",
+ },
+ KeyPolicy{
+ Prefix: "foo/bar/",
+ Policy: "read",
+ },
+ KeyPolicy{
+ Prefix: "foo/bar/baz",
+ Policy: "deny",
+ },
+ },
+ }
+
+ files := []string{
+ "decode_policy.hcl",
+ "decode_policy.json",
+ }
+
+ for _, f := range files {
+ var actual Policy
+
+ err := Decode(&actual, testReadFile(t, f))
+ if err != nil {
+ t.Fatalf("Input: %s\n\nerr: %s", f, err)
+ }
+
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected)
+ }
+ }
+}
+
+func TestDecode_sliceExpand(t *testing.T) {
+ type testInner struct {
+ Name string `hcl:",key"`
+ Key string
+ }
+
+ type testStruct struct {
+ Services []testInner `hcl:"service,expand"`
+ }
+
+ expected := testStruct{
+ Services: []testInner{
+ testInner{
+ Name: "my-service-0",
+ Key: "value",
+ },
+ testInner{
+ Name: "my-service-1",
+ Key: "value",
+ },
+ },
+ }
+
+ files := []string{
+ "slice_expand.hcl",
+ }
+
+ for _, f := range files {
+ t.Logf("Testing: %s", f)
+
+ var actual testStruct
+ err := Decode(&actual, testReadFile(t, f))
+ if err != nil {
+ t.Fatalf("Input: %s\n\nerr: %s", f, err)
+ }
+
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected)
+ }
+ }
+}
+
+func TestDecode_structureMap(t *testing.T) {
+ // This test is extracted from a failure in Terraform (terraform.io),
+ // hence the interesting structure naming.
+
+ type hclVariable struct {
+ Default interface{}
+ Description string
+ Fields []string `hcl:",decodedFields"`
+ }
+
+ type rawConfig struct {
+ Variable map[string]hclVariable
+ }
+
+ expected := rawConfig{
+ Variable: map[string]hclVariable{
+ "foo": hclVariable{
+ Default: "bar",
+ Description: "bar",
+ Fields: []string{"Default", "Description"},
+ },
+
+ "amis": hclVariable{
+ Default: []map[string]interface{}{
+ map[string]interface{}{
+ "east": "foo",
+ },
+ },
+ Fields: []string{"Default"},
+ },
+ },
+ }
+
+ files := []string{
+ "decode_tf_variable.hcl",
+ "decode_tf_variable.json",
+ }
+
+ for _, f := range files {
+ t.Logf("Testing: %s", f)
+
+ var actual rawConfig
+ err := Decode(&actual, testReadFile(t, f))
+ if err != nil {
+ t.Fatalf("Input: %s\n\nerr: %s", f, err)
+ }
+
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected)
+ }
+ }
+}
+
+func TestDecode_structureMapInvalid(t *testing.T) {
+ // Terraform GH-8295
+
+ type hclVariable struct {
+ Default interface{}
+ Description string
+ Fields []string `hcl:",decodedFields"`
+ }
+
+ type rawConfig struct {
+ Variable map[string]*hclVariable
+ }
+
+ var actual rawConfig
+ err := Decode(&actual, testReadFile(t, "terraform_variable_invalid.json"))
+ if err == nil {
+ t.Fatal("expected error")
+ }
+}
+
+func TestDecode_interfaceNonPointer(t *testing.T) {
+ var value interface{}
+ err := Decode(value, testReadFile(t, "basic_int_string.hcl"))
+ if err == nil {
+ t.Fatal("should error")
+ }
+}
+
+func TestDecode_intString(t *testing.T) {
+ var value struct {
+ Count int
+ }
+
+ err := Decode(&value, testReadFile(t, "basic_int_string.hcl"))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if value.Count != 3 {
+ t.Fatalf("bad: %#v", value.Count)
+ }
+}
+
+func TestDecode_Node(t *testing.T) {
+ // given
+ var value struct {
+ Content ast.Node
+ Nested struct {
+ Content ast.Node
+ }
+ }
+
+ content := `
+content {
+ hello = "world"
+}
+`
+
+ // when
+ err := Decode(&value, content)
+
+ // then
+ if err != nil {
+ t.Errorf("unable to decode content, %v", err)
+ return
+ }
+
+ // verify ast.Node can be decoded later
+ var v map[string]interface{}
+ err = DecodeObject(&v, value.Content)
+ if err != nil {
+ t.Errorf("unable to decode content, %v", err)
+ return
+ }
+
+ if v["hello"] != "world" {
+ t.Errorf("expected mapping to be returned")
+ }
+}
+
+func TestDecode_NestedNode(t *testing.T) {
+ // given
+ var value struct {
+ Nested struct {
+ Content ast.Node
+ }
+ }
+
+ content := `
+nested "content" {
+ hello = "world"
+}
+`
+
+ // when
+ err := Decode(&value, content)
+
+ // then
+ if err != nil {
+ t.Errorf("unable to decode content, %v", err)
+ return
+ }
+
+ // verify ast.Node can be decoded later
+ var v map[string]interface{}
+ err = DecodeObject(&v, value.Nested.Content)
+ if err != nil {
+ t.Errorf("unable to decode content, %v", err)
+ return
+ }
+
+ if v["hello"] != "world" {
+ t.Errorf("expected mapping to be returned")
+ }
+}
+
+// https://github.com/hashicorp/hcl/issues/60
+func TestDecode_topLevelKeys(t *testing.T) {
+ type Template struct {
+ Source string
+ }
+
+ templates := struct {
+ Templates []*Template `hcl:"template"`
+ }{}
+
+ err := Decode(&templates, `
+ template {
+ source = "blah"
+ }
+
+ template {
+ source = "blahblah"
+ }`)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if templates.Templates[0].Source != "blah" {
+ t.Errorf("bad source: %s", templates.Templates[0].Source)
+ }
+
+ if templates.Templates[1].Source != "blahblah" {
+ t.Errorf("bad source: %s", templates.Templates[1].Source)
+ }
+}
+
+func TestDecode_flattenedJSON(t *testing.T) {
+ // make sure we can also correctly extract a Name key too
+ type V struct {
+ Name string `hcl:",key"`
+ Description string
+ Default map[string]string
+ }
+ type Vars struct {
+ Variable []*V
+ }
+
+ cases := []struct {
+ JSON string
+ Out interface{}
+ Expected interface{}
+ }{
+ { // Nested object, no sibling keys
+ JSON: `
+{
+ "var_name": {
+ "default": {
+ "key1": "a",
+ "key2": "b"
+ }
+ }
+}
+ `,
+ Out: &[]*V{},
+ Expected: &[]*V{
+ &V{
+ Name: "var_name",
+ Default: map[string]string{"key1": "a", "key2": "b"},
+ },
+ },
+ },
+
+ { // Nested object with a sibling key (this worked previously)
+ JSON: `
+{
+ "var_name": {
+ "description": "Described",
+ "default": {
+ "key1": "a",
+ "key2": "b"
+ }
+ }
+}
+ `,
+ Out: &[]*V{},
+ Expected: &[]*V{
+ &V{
+ Name: "var_name",
+ Description: "Described",
+ Default: map[string]string{"key1": "a", "key2": "b"},
+ },
+ },
+ },
+
+ { // Multiple nested objects, one with a sibling key
+ JSON: `
+{
+ "variable": {
+ "var_1": {
+ "default": {
+ "key1": "a",
+ "key2": "b"
+ }
+ },
+ "var_2": {
+ "description": "Described",
+ "default": {
+ "key1": "a",
+ "key2": "b"
+ }
+ }
+ }
+}
+ `,
+ Out: &Vars{},
+ Expected: &Vars{
+ Variable: []*V{
+ &V{
+ Name: "var_1",
+ Default: map[string]string{"key1": "a", "key2": "b"},
+ },
+ &V{
+ Name: "var_2",
+ Description: "Described",
+ Default: map[string]string{"key1": "a", "key2": "b"},
+ },
+ },
+ },
+ },
+
+ { // Nested object to maps
+ JSON: `
+{
+ "variable": {
+ "var_name": {
+ "description": "Described",
+ "default": {
+ "key1": "a",
+ "key2": "b"
+ }
+ }
+ }
+}
+ `,
+ Out: &[]map[string]interface{}{},
+ Expected: &[]map[string]interface{}{
+ {
+ "variable": []map[string]interface{}{
+ {
+ "var_name": []map[string]interface{}{
+ {
+ "description": "Described",
+ "default": []map[string]interface{}{
+ {
+ "key1": "a",
+ "key2": "b",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+
+ { // Nested object to maps without a sibling key should decode the same as above
+ JSON: `
+{
+ "variable": {
+ "var_name": {
+ "default": {
+ "key1": "a",
+ "key2": "b"
+ }
+ }
+ }
+}
+ `,
+ Out: &[]map[string]interface{}{},
+ Expected: &[]map[string]interface{}{
+ {
+ "variable": []map[string]interface{}{
+ {
+ "var_name": []map[string]interface{}{
+ {
+ "default": []map[string]interface{}{
+ {
+ "key1": "a",
+ "key2": "b",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+
+ { // Nested objects, one with a sibling key, and one without
+ JSON: `
+{
+ "variable": {
+ "var_1": {
+ "default": {
+ "key1": "a",
+ "key2": "b"
+ }
+ },
+ "var_2": {
+ "description": "Described",
+ "default": {
+ "key1": "a",
+ "key2": "b"
+ }
+ }
+ }
+}
+ `,
+ Out: &[]map[string]interface{}{},
+ Expected: &[]map[string]interface{}{
+ {
+ "variable": []map[string]interface{}{
+ {
+ "var_1": []map[string]interface{}{
+ {
+ "default": []map[string]interface{}{
+ {
+ "key1": "a",
+ "key2": "b",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "variable": []map[string]interface{}{
+ {
+ "var_2": []map[string]interface{}{
+ {
+ "description": "Described",
+ "default": []map[string]interface{}{
+ {
+ "key1": "a",
+ "key2": "b",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+
+ for i, tc := range cases {
+ err := Decode(tc.Out, tc.JSON)
+ if err != nil {
+ t.Fatalf("[%d] err: %s", i, err)
+ }
+
+ if !reflect.DeepEqual(tc.Out, tc.Expected) {
+ t.Fatalf("[%d]\ngot: %s\nexpected: %s\n", i, spew.Sdump(tc.Out), spew.Sdump(tc.Expected))
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast_test.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast_test.go
new file mode 100644
index 0000000..942256c
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast_test.go
@@ -0,0 +1,200 @@
+package ast
+
+import (
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+func TestObjectListFilter(t *testing.T) {
+ var cases = []struct {
+ Filter []string
+ Input []*ObjectItem
+ Output []*ObjectItem
+ }{
+ {
+ []string{"foo"},
+ []*ObjectItem{
+ &ObjectItem{
+ Keys: []*ObjectKey{
+ &ObjectKey{
+ Token: token.Token{Type: token.STRING, Text: `"foo"`},
+ },
+ },
+ },
+ },
+ []*ObjectItem{
+ &ObjectItem{
+ Keys: []*ObjectKey{},
+ },
+ },
+ },
+
+ {
+ []string{"foo"},
+ []*ObjectItem{
+ &ObjectItem{
+ Keys: []*ObjectKey{
+ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}},
+ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
+ },
+ },
+ &ObjectItem{
+ Keys: []*ObjectKey{
+ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}},
+ },
+ },
+ },
+ []*ObjectItem{
+ &ObjectItem{
+ Keys: []*ObjectKey{
+ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
+ },
+ },
+ },
+ },
+ }
+
+ for _, tc := range cases {
+ input := &ObjectList{Items: tc.Input}
+ expected := &ObjectList{Items: tc.Output}
+ if actual := input.Filter(tc.Filter...); !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("in order: input, expected, actual\n\n%#v\n\n%#v\n\n%#v", input, expected, actual)
+ }
+ }
+}
+
+func TestWalk(t *testing.T) {
+ items := []*ObjectItem{
+ &ObjectItem{
+ Keys: []*ObjectKey{
+ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}},
+ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
+ },
+ Val: &LiteralType{Token: token.Token{Type: token.STRING, Text: `"example"`}},
+ },
+ &ObjectItem{
+ Keys: []*ObjectKey{
+ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}},
+ },
+ },
+ }
+
+ node := &ObjectList{Items: items}
+
+ order := []string{
+ "*ast.ObjectList",
+ "*ast.ObjectItem",
+ "*ast.ObjectKey",
+ "*ast.ObjectKey",
+ "*ast.LiteralType",
+ "*ast.ObjectItem",
+ "*ast.ObjectKey",
+ }
+ count := 0
+
+ Walk(node, func(n Node) (Node, bool) {
+ if n == nil {
+ return n, false
+ }
+
+ typeName := reflect.TypeOf(n).String()
+ if order[count] != typeName {
+ t.Errorf("expected '%s' got: '%s'", order[count], typeName)
+ }
+ count++
+ return n, true
+ })
+}
+
+func TestWalkEquality(t *testing.T) {
+ items := []*ObjectItem{
+ &ObjectItem{
+ Keys: []*ObjectKey{
+ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}},
+ },
+ },
+ &ObjectItem{
+ Keys: []*ObjectKey{
+ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
+ },
+ },
+ }
+
+ node := &ObjectList{Items: items}
+
+ rewritten := Walk(node, func(n Node) (Node, bool) { return n, true })
+
+ newNode, ok := rewritten.(*ObjectList)
+ if !ok {
+ t.Fatalf("expected Objectlist, got %T", rewritten)
+ }
+
+ if !reflect.DeepEqual(node, newNode) {
+ t.Fatal("rewritten node is not equal to the given node")
+ }
+
+ if len(newNode.Items) != 2 {
+ t.Error("expected newNode length 2, got: %d", len(newNode.Items))
+ }
+
+ expected := []string{
+ `"foo"`,
+ `"bar"`,
+ }
+
+ for i, item := range newNode.Items {
+ if len(item.Keys) != 1 {
+ t.Error("expected keys newNode length 1, got: %d", len(item.Keys))
+ }
+
+ if item.Keys[0].Token.Text != expected[i] {
+ t.Errorf("expected key %s, got %s", expected[i], item.Keys[0].Token.Text)
+ }
+
+ if item.Val != nil {
+ t.Errorf("expected item value should be nil")
+ }
+ }
+}
+
+func TestWalkRewrite(t *testing.T) {
+ items := []*ObjectItem{
+ &ObjectItem{
+ Keys: []*ObjectKey{
+ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}},
+ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
+ },
+ },
+ &ObjectItem{
+ Keys: []*ObjectKey{
+ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}},
+ },
+ },
+ }
+
+ node := &ObjectList{Items: items}
+
+ suffix := "_example"
+ node = Walk(node, func(n Node) (Node, bool) {
+ switch i := n.(type) {
+ case *ObjectKey:
+ i.Token.Text = i.Token.Text + suffix
+ n = i
+ }
+ return n, true
+ }).(*ObjectList)
+
+ Walk(node, func(n Node) (Node, bool) {
+ switch i := n.(type) {
+ case *ObjectKey:
+ if !strings.HasSuffix(i.Token.Text, suffix) {
+ t.Errorf("Token '%s' should have suffix: %s", i.Token.Text, suffix)
+ }
+ }
+ return n, true
+ })
+
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go
new file mode 100644
index 0000000..85e536d
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go
@@ -0,0 +1,162 @@
+// Derivative work from:
+// - https://golang.org/src/cmd/gofmt/gofmt.go
+// - https://github.com/fatih/hclfmt
+
+package fmtcmd
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+
+ "github.com/hashicorp/hcl/hcl/printer"
+)
+
+var (
+ ErrWriteStdin = errors.New("cannot use write option with standard input")
+)
+
+type Options struct {
+ List bool // list files whose formatting differs
+ Write bool // write result to (source) file instead of stdout
+ Diff bool // display diffs of formatting changes
+}
+
+func isValidFile(f os.FileInfo, extensions []string) bool {
+ if !f.IsDir() && !strings.HasPrefix(f.Name(), ".") {
+ for _, ext := range extensions {
+ if strings.HasSuffix(f.Name(), "."+ext) {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+// If in == nil, the source is the contents of the file with the given filename.
+func processFile(filename string, in io.Reader, out io.Writer, stdin bool, opts Options) error {
+ if in == nil {
+ f, err := os.Open(filename)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ in = f
+ }
+
+ src, err := ioutil.ReadAll(in)
+ if err != nil {
+ return err
+ }
+
+ res, err := printer.Format(src)
+ if err != nil {
+ return err
+ }
+
+ if !bytes.Equal(src, res) {
+ // formatting has changed
+ if opts.List {
+ fmt.Fprintln(out, filename)
+ }
+ if opts.Write {
+ err = ioutil.WriteFile(filename, res, 0644)
+ if err != nil {
+ return err
+ }
+ }
+ if opts.Diff {
+ data, err := diff(src, res)
+ if err != nil {
+ return fmt.Errorf("computing diff: %s", err)
+ }
+ fmt.Fprintf(out, "diff a/%s b/%s\n", filename, filename)
+ out.Write(data)
+ }
+ }
+
+ if !opts.List && !opts.Write && !opts.Diff {
+ _, err = out.Write(res)
+ }
+
+ return err
+}
+
+func walkDir(path string, extensions []string, stdout io.Writer, opts Options) error {
+ visitFile := func(path string, f os.FileInfo, err error) error {
+ if err == nil && isValidFile(f, extensions) {
+ err = processFile(path, nil, stdout, false, opts)
+ }
+ return err
+ }
+
+ return filepath.Walk(path, visitFile)
+}
+
+func Run(
+ paths, extensions []string,
+ stdin io.Reader,
+ stdout io.Writer,
+ opts Options,
+) error {
+ if len(paths) == 0 {
+ if opts.Write {
+ return ErrWriteStdin
+ }
+ if err := processFile("", stdin, stdout, true, opts); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ for _, path := range paths {
+ switch dir, err := os.Stat(path); {
+ case err != nil:
+ return err
+ case dir.IsDir():
+ if err := walkDir(path, extensions, stdout, opts); err != nil {
+ return err
+ }
+ default:
+ if err := processFile(path, nil, stdout, false, opts); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func diff(b1, b2 []byte) (data []byte, err error) {
+ f1, err := ioutil.TempFile("", "")
+ if err != nil {
+ return
+ }
+ defer os.Remove(f1.Name())
+ defer f1.Close()
+
+ f2, err := ioutil.TempFile("", "")
+ if err != nil {
+ return
+ }
+ defer os.Remove(f2.Name())
+ defer f2.Close()
+
+ f1.Write(b1)
+ f2.Write(b2)
+
+ data, err = exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput()
+ if len(data) > 0 {
+ // diff exits with a non-zero status when the files don't match.
+ // Ignore that failure as long as we get output.
+ err = nil
+ }
+ return
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd_test.go b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd_test.go
new file mode 100644
index 0000000..b952d76
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd_test.go
@@ -0,0 +1,440 @@
+// +build !windows
+// TODO(jen20): These need fixing on Windows but fmt is not used right now
+// and red CI is making it harder to process other bugs, so ignore until
+// we get around to fixing them.
+
+package fmtcmd
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "reflect"
+ "regexp"
+ "sort"
+ "syscall"
+ "testing"
+
+ "github.com/hashicorp/hcl/testhelper"
+)
+
+var fixtureExtensions = []string{"hcl"}
+
+func init() {
+ sort.Sort(ByFilename(fixtures))
+}
+
+func TestIsValidFile(t *testing.T) {
+ const fixtureDir = "./test-fixtures"
+
+ cases := []struct {
+ Path string
+ Expected bool
+ }{
+ {"good.hcl", true},
+ {".hidden.ignore", false},
+ {"file.ignore", false},
+ {"dir.ignore", false},
+ }
+
+ for _, tc := range cases {
+ file, err := os.Stat(filepath.Join(fixtureDir, tc.Path))
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+
+ if res := isValidFile(file, fixtureExtensions); res != tc.Expected {
+ t.Errorf("want: %b, got: %b", tc.Expected, res)
+ }
+ }
+}
+
+func TestRunMultiplePaths(t *testing.T) {
+ path1, err := renderFixtures("")
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ defer os.RemoveAll(path1)
+ path2, err := renderFixtures("")
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ defer os.RemoveAll(path2)
+
+ var expectedOut bytes.Buffer
+ for _, path := range []string{path1, path2} {
+ for _, fixture := range fixtures {
+ if !bytes.Equal(fixture.golden, fixture.input) {
+ expectedOut.WriteString(filepath.Join(path, fixture.filename) + "\n")
+ }
+ }
+ }
+
+ _, stdout := mockIO()
+ err = Run(
+ []string{path1, path2},
+ fixtureExtensions,
+ nil, stdout,
+ Options{
+ List: true,
+ },
+ )
+
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ if stdout.String() != expectedOut.String() {
+ t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout)
+ }
+}
+
+func TestRunSubDirectories(t *testing.T) {
+ pathParent, err := ioutil.TempDir("", "")
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ defer os.RemoveAll(pathParent)
+
+ path1, err := renderFixtures(pathParent)
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ path2, err := renderFixtures(pathParent)
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+
+ paths := []string{path1, path2}
+ sort.Strings(paths)
+
+ var expectedOut bytes.Buffer
+ for _, path := range paths {
+ for _, fixture := range fixtures {
+ if !bytes.Equal(fixture.golden, fixture.input) {
+ expectedOut.WriteString(filepath.Join(path, fixture.filename) + "\n")
+ }
+ }
+ }
+
+ _, stdout := mockIO()
+ err = Run(
+ []string{pathParent},
+ fixtureExtensions,
+ nil, stdout,
+ Options{
+ List: true,
+ },
+ )
+
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ if stdout.String() != expectedOut.String() {
+ t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout)
+ }
+}
+
+func TestRunStdin(t *testing.T) {
+ var expectedOut bytes.Buffer
+ for i, fixture := range fixtures {
+ if i != 0 {
+ expectedOut.WriteString("\n")
+ }
+ expectedOut.Write(fixture.golden)
+ }
+
+ stdin, stdout := mockIO()
+ for _, fixture := range fixtures {
+ stdin.Write(fixture.input)
+ }
+
+ err := Run(
+ []string{},
+ fixtureExtensions,
+ stdin, stdout,
+ Options{},
+ )
+
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ if !bytes.Equal(stdout.Bytes(), expectedOut.Bytes()) {
+ t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout)
+ }
+}
+
+func TestRunStdinAndWrite(t *testing.T) {
+ var expectedOut = []byte{}
+
+ stdin, stdout := mockIO()
+ stdin.WriteString("")
+ err := Run(
+ []string{}, []string{},
+ stdin, stdout,
+ Options{
+ Write: true,
+ },
+ )
+
+ if err != ErrWriteStdin {
+ t.Errorf("error want:\n%s\ngot:\n%s", ErrWriteStdin, err)
+ }
+ if !bytes.Equal(stdout.Bytes(), expectedOut) {
+ t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout)
+ }
+}
+
+func TestRunFileError(t *testing.T) {
+ path, err := ioutil.TempDir("", "")
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ defer os.RemoveAll(path)
+ filename := filepath.Join(path, "unreadable.hcl")
+
+ var expectedError = &os.PathError{
+ Op: "open",
+ Path: filename,
+ Err: syscall.EACCES,
+ }
+
+ err = ioutil.WriteFile(filename, []byte{}, 0000)
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+
+ _, stdout := mockIO()
+ err = Run(
+ []string{path},
+ fixtureExtensions,
+ nil, stdout,
+ Options{},
+ )
+
+ if !reflect.DeepEqual(err, expectedError) {
+ t.Errorf("error want: %#v, got: %#v", expectedError, err)
+ }
+}
+
+func TestRunNoOptions(t *testing.T) {
+ path, err := renderFixtures("")
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ defer os.RemoveAll(path)
+
+ var expectedOut bytes.Buffer
+ for _, fixture := range fixtures {
+ expectedOut.Write(fixture.golden)
+ }
+
+ _, stdout := mockIO()
+ err = Run(
+ []string{path},
+ fixtureExtensions,
+ nil, stdout,
+ Options{},
+ )
+
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ if stdout.String() != expectedOut.String() {
+ t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout)
+ }
+}
+
+func TestRunList(t *testing.T) {
+ path, err := renderFixtures("")
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ defer os.RemoveAll(path)
+
+ var expectedOut bytes.Buffer
+ for _, fixture := range fixtures {
+ if !bytes.Equal(fixture.golden, fixture.input) {
+ expectedOut.WriteString(fmt.Sprintln(filepath.Join(path, fixture.filename)))
+ }
+ }
+
+ _, stdout := mockIO()
+ err = Run(
+ []string{path},
+ fixtureExtensions,
+ nil, stdout,
+ Options{
+ List: true,
+ },
+ )
+
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ if stdout.String() != expectedOut.String() {
+ t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout)
+ }
+}
+
+func TestRunWrite(t *testing.T) {
+ path, err := renderFixtures("")
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ defer os.RemoveAll(path)
+
+ _, stdout := mockIO()
+ err = Run(
+ []string{path},
+ fixtureExtensions,
+ nil, stdout,
+ Options{
+ Write: true,
+ },
+ )
+
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ for _, fixture := range fixtures {
+ res, err := ioutil.ReadFile(filepath.Join(path, fixture.filename))
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ if !bytes.Equal(res, fixture.golden) {
+ t.Errorf("file %q contents want:\n%s\ngot:\n%s", fixture.filename, fixture.golden, res)
+ }
+ }
+}
+
+func TestRunDiff(t *testing.T) {
+ path, err := renderFixtures("")
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ defer os.RemoveAll(path)
+
+ var expectedOut bytes.Buffer
+ for _, fixture := range fixtures {
+ if len(fixture.diff) > 0 {
+ expectedOut.WriteString(
+ regexp.QuoteMeta(
+ fmt.Sprintf("diff a/%s/%s b/%s/%s\n", path, fixture.filename, path, fixture.filename),
+ ),
+ )
+ // Need to use regex to ignore datetimes in diff.
+ expectedOut.WriteString(`--- .+?\n`)
+ expectedOut.WriteString(`\+\+\+ .+?\n`)
+ expectedOut.WriteString(regexp.QuoteMeta(string(fixture.diff)))
+ }
+ }
+
+ expectedOutString := testhelper.Unix2dos(expectedOut.String())
+
+ _, stdout := mockIO()
+ err = Run(
+ []string{path},
+ fixtureExtensions,
+ nil, stdout,
+ Options{
+ Diff: true,
+ },
+ )
+
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ if !regexp.MustCompile(expectedOutString).Match(stdout.Bytes()) {
+ t.Errorf("stdout want match:\n%s\ngot:\n%q", expectedOutString, stdout)
+ }
+}
+
+func mockIO() (stdin, stdout *bytes.Buffer) {
+ return new(bytes.Buffer), new(bytes.Buffer)
+}
+
+type fixture struct {
+ filename string
+ input, golden, diff []byte
+}
+
+type ByFilename []fixture
+
+func (s ByFilename) Len() int { return len(s) }
+func (s ByFilename) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s ByFilename) Less(i, j int) bool { return len(s[i].filename) > len(s[j].filename) }
+
+var fixtures = []fixture{
+ {
+ "noop.hcl",
+ []byte(`resource "aws_security_group" "firewall" {
+ count = 5
+}
+`),
+ []byte(`resource "aws_security_group" "firewall" {
+ count = 5
+}
+`),
+ []byte(``),
+ }, {
+ "align_equals.hcl",
+ []byte(`variable "foo" {
+ default = "bar"
+ description = "bar"
+}
+`),
+ []byte(`variable "foo" {
+ default = "bar"
+ description = "bar"
+}
+`),
+ []byte(`@@ -1,4 +1,4 @@
+ variable "foo" {
+- default = "bar"
++ default = "bar"
+ description = "bar"
+ }
+`),
+ }, {
+ "indentation.hcl",
+ []byte(`provider "aws" {
+ access_key = "foo"
+ secret_key = "bar"
+}
+`),
+ []byte(`provider "aws" {
+ access_key = "foo"
+ secret_key = "bar"
+}
+`),
+ []byte(`@@ -1,4 +1,4 @@
+ provider "aws" {
+- access_key = "foo"
+- secret_key = "bar"
++ access_key = "foo"
++ secret_key = "bar"
+ }
+`),
+ },
+}
+
+// parent can be an empty string, in which case the system's default
+// temporary directory will be used.
+func renderFixtures(parent string) (path string, err error) {
+ path, err = ioutil.TempDir(parent, "")
+ if err != nil {
+ return "", err
+ }
+
+ for _, fixture := range fixtures {
+ err = ioutil.WriteFile(filepath.Join(path, fixture.filename), []byte(fixture.input), 0644)
+ if err != nil {
+ os.RemoveAll(path)
+ return "", err
+ }
+ }
+
+ return path, nil
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/.hidden.ignore b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/.hidden.ignore
new file mode 100644
index 0000000..9977a28
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/.hidden.ignore
@@ -0,0 +1 @@
+invalid
diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/dir.ignore b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/dir.ignore
new file mode 100644
index 0000000..e69de29
diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/file.ignore b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/file.ignore
new file mode 100644
index 0000000..9977a28
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/file.ignore
@@ -0,0 +1 @@
+invalid
diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/good.hcl b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/good.hcl
new file mode 100644
index 0000000..e69de29
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error_test.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error_test.go
new file mode 100644
index 0000000..32399fe
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/error_test.go
@@ -0,0 +1,9 @@
+package parser
+
+import (
+ "testing"
+)
+
+func TestPosError_impl(t *testing.T) {
+ var _ error = new(PosError)
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go
new file mode 100644
index 0000000..2756d06
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go
@@ -0,0 +1,420 @@
+package parser
+
+import (
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "strings"
+ "testing"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+func TestType(t *testing.T) {
+ var literals = []struct {
+ typ token.Type
+ src string
+ }{
+ {token.STRING, `foo = "foo"`},
+ {token.NUMBER, `foo = 123`},
+ {token.NUMBER, `foo = -29`},
+ {token.FLOAT, `foo = 123.12`},
+ {token.FLOAT, `foo = -123.12`},
+ {token.BOOL, `foo = true`},
+ {token.HEREDOC, "foo = < 0 {
+ commented = true
+ buf.WriteByte(newline)
+ }
+
+ buf.Write(p.indent([]byte(comment.Text)))
+ buf.WriteByte(newline)
+ if index != len(o.List.Items) {
+ buf.WriteByte(newline) // do not print on the end
+ }
+ }
+ }
+ }
+
+ if index == len(o.List.Items) {
+ p.prev = o.Rbrace
+ break
+ }
+
+ // At this point we are sure that it's not a totally empty block: print
+ // the initial newline if it hasn't been printed yet by the previous
+ // block about standalone comments.
+ if !newlinePrinted {
+ buf.WriteByte(newline)
+ newlinePrinted = true
+ }
+
+ // check if we have adjacent one liner items. If yes we'll going to align
+ // the comments.
+ var aligned []*ast.ObjectItem
+ for _, item := range o.List.Items[index:] {
+ // we don't group one line lists
+ if len(o.List.Items) == 1 {
+ break
+ }
+
+ // one means a oneliner with out any lead comment
+ // two means a oneliner with lead comment
+ // anything else might be something else
+ cur := lines(string(p.objectItem(item)))
+ if cur > 2 {
+ break
+ }
+
+ curPos := item.Pos()
+
+ nextPos := token.Pos{}
+ if index != len(o.List.Items)-1 {
+ nextPos = o.List.Items[index+1].Pos()
+ }
+
+ prevPos := token.Pos{}
+ if index != 0 {
+ prevPos = o.List.Items[index-1].Pos()
+ }
+
+ // fmt.Println("DEBUG ----------------")
+ // fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos)
+ // fmt.Printf("cur = %+v curPos: %s\n", cur, curPos)
+ // fmt.Printf("next = %+v nextPos: %s\n", next, nextPos)
+
+ if curPos.Line+1 == nextPos.Line {
+ aligned = append(aligned, item)
+ index++
+ continue
+ }
+
+ if curPos.Line-1 == prevPos.Line {
+ aligned = append(aligned, item)
+ index++
+
+ // finish if we have a new line or comment next. This happens
+ // if the next item is not adjacent
+ if curPos.Line+1 != nextPos.Line {
+ break
+ }
+ continue
+ }
+
+ break
+ }
+
+ // put newlines if the items are between other non aligned items.
+ // newlines are also added if there is a standalone comment already, so
+ // check it too
+ if !commented && index != len(aligned) {
+ buf.WriteByte(newline)
+ }
+
+ if len(aligned) >= 1 {
+ p.prev = aligned[len(aligned)-1].Pos()
+
+ items := p.alignedItems(aligned)
+ buf.Write(p.indent(items))
+ } else {
+ p.prev = o.List.Items[index].Pos()
+
+ buf.Write(p.indent(p.objectItem(o.List.Items[index])))
+ index++
+ }
+
+ buf.WriteByte(newline)
+ }
+
+ buf.WriteString("}")
+ return buf.Bytes()
+}
+
+func (p *printer) alignedItems(items []*ast.ObjectItem) []byte {
+ var buf bytes.Buffer
+
+ // find the longest key and value length, needed for alignment
+ var longestKeyLen int // longest key length
+ var longestValLen int // longest value length
+ for _, item := range items {
+ key := len(item.Keys[0].Token.Text)
+ val := len(p.output(item.Val))
+
+ if key > longestKeyLen {
+ longestKeyLen = key
+ }
+
+ if val > longestValLen {
+ longestValLen = val
+ }
+ }
+
+ for i, item := range items {
+ if item.LeadComment != nil {
+ for _, comment := range item.LeadComment.List {
+ buf.WriteString(comment.Text)
+ buf.WriteByte(newline)
+ }
+ }
+
+ for i, k := range item.Keys {
+ keyLen := len(k.Token.Text)
+ buf.WriteString(k.Token.Text)
+ for i := 0; i < longestKeyLen-keyLen+1; i++ {
+ buf.WriteByte(blank)
+ }
+
+ // reach end of key
+ if i == len(item.Keys)-1 && len(item.Keys) == 1 {
+ buf.WriteString("=")
+ buf.WriteByte(blank)
+ }
+ }
+
+ val := p.output(item.Val)
+ valLen := len(val)
+ buf.Write(val)
+
+ if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil {
+ for i := 0; i < longestValLen-valLen+1; i++ {
+ buf.WriteByte(blank)
+ }
+
+ for _, comment := range item.LineComment.List {
+ buf.WriteString(comment.Text)
+ }
+ }
+
+ // do not print for the last item
+ if i != len(items)-1 {
+ buf.WriteByte(newline)
+ }
+ }
+
+ return buf.Bytes()
+}
+
+// list returns the printable HCL form of an list type.
+func (p *printer) list(l *ast.ListType) []byte {
+ var buf bytes.Buffer
+ buf.WriteString("[")
+
+ var longestLine int
+ for _, item := range l.List {
+ // for now we assume that the list only contains literal types
+ if lit, ok := item.(*ast.LiteralType); ok {
+ lineLen := len(lit.Token.Text)
+ if lineLen > longestLine {
+ longestLine = lineLen
+ }
+ }
+ }
+
+ insertSpaceBeforeItem := false
+ for i, item := range l.List {
+ if item.Pos().Line != l.Lbrack.Line {
+ // multiline list, add newline before we add each item
+ buf.WriteByte(newline)
+ insertSpaceBeforeItem = false
+ // also indent each line
+ val := p.output(item)
+ curLen := len(val)
+ buf.Write(p.indent(val))
+ buf.WriteString(",")
+
+ if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil {
+ // if the next item doesn't have any comments, do not align
+ buf.WriteByte(blank) // align one space
+ for i := 0; i < longestLine-curLen; i++ {
+ buf.WriteByte(blank)
+ }
+
+ for _, comment := range lit.LineComment.List {
+ buf.WriteString(comment.Text)
+ }
+ }
+
+ if i == len(l.List)-1 {
+ buf.WriteByte(newline)
+ }
+ } else {
+ if insertSpaceBeforeItem {
+ buf.WriteByte(blank)
+ insertSpaceBeforeItem = false
+ }
+ buf.Write(p.output(item))
+ if i != len(l.List)-1 {
+ buf.WriteString(",")
+ insertSpaceBeforeItem = true
+ }
+ }
+
+ }
+
+ buf.WriteString("]")
+ return buf.Bytes()
+}
+
+// indent indents the lines of the given buffer for each non-empty line
+func (p *printer) indent(buf []byte) []byte {
+ var prefix []byte
+ if p.cfg.SpacesWidth != 0 {
+ for i := 0; i < p.cfg.SpacesWidth; i++ {
+ prefix = append(prefix, blank)
+ }
+ } else {
+ prefix = []byte{tab}
+ }
+
+ var res []byte
+ bol := true
+ for _, c := range buf {
+ if bol && c != '\n' {
+ res = append(res, prefix...)
+ }
+
+ res = append(res, c)
+ bol = c == '\n'
+ }
+ return res
+}
+
+// unindent removes all the indentation from the tombstoned lines
+func (p *printer) unindent(buf []byte) []byte {
+ var res []byte
+ for i := 0; i < len(buf); i++ {
+ skip := len(buf)-i <= len(unindent)
+ if !skip {
+ skip = !bytes.Equal(unindent, buf[i:i+len(unindent)])
+ }
+ if skip {
+ res = append(res, buf[i])
+ continue
+ }
+
+ // We have a marker. we have to backtrace here and clean out
+ // any whitespace ahead of our tombstone up to a \n
+ for j := len(res) - 1; j >= 0; j-- {
+ if res[j] == '\n' {
+ break
+ }
+
+ res = res[:j]
+ }
+
+ // Skip the entire unindent marker
+ i += len(unindent) - 1
+ }
+
+ return res
+}
+
+// heredocIndent marks all the 2nd and further lines as unindentable
+func (p *printer) heredocIndent(buf []byte) []byte {
+ var res []byte
+ bol := false
+ for _, c := range buf {
+ if bol && c != '\n' {
+ res = append(res, unindent...)
+ }
+ res = append(res, c)
+ bol = c == '\n'
+ }
+ return res
+}
+
+func lines(txt string) int {
+ endline := 1
+ for i := 0; i < len(txt); i++ {
+ if txt[i] == '\n' {
+ endline++
+ }
+ }
+ return endline
+}
+
+// ----------------------------------------------------------------------------
+// Tracing support
+
+func (p *printer) printTrace(a ...interface{}) {
+ if !p.enableTrace {
+ return
+ }
+
+ const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+ const n = len(dots)
+ i := 2 * p.indentTrace
+ for i > n {
+ fmt.Print(dots)
+ i -= n
+ }
+ // i <= n
+ fmt.Print(dots[0:i])
+ fmt.Println(a...)
+}
+
+func trace(p *printer, msg string) *printer {
+ p.printTrace(msg, "(")
+ p.indentTrace++
+ return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *printer) {
+ p.indentTrace--
+ p.printTrace(")")
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go
new file mode 100644
index 0000000..a296fc8
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go
@@ -0,0 +1,67 @@
+// Package printer implements printing of AST nodes to HCL format.
+package printer
+
+import (
+ "bytes"
+ "io"
+ "text/tabwriter"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/hcl/parser"
+)
+
+var DefaultConfig = Config{
+ SpacesWidth: 2,
+}
+
+// A Config node controls the output of Fprint.
+type Config struct {
+ SpacesWidth int // if set, it will use spaces instead of tabs for alignment
+}
+
+func (c *Config) Fprint(output io.Writer, node ast.Node) error {
+ p := &printer{
+ cfg: *c,
+ comments: make([]*ast.CommentGroup, 0),
+ standaloneComments: make([]*ast.CommentGroup, 0),
+ // enableTrace: true,
+ }
+
+ p.collectComments(node)
+
+ if _, err := output.Write(p.unindent(p.output(node))); err != nil {
+ return err
+ }
+
+ // flush tabwriter, if any
+ var err error
+ if tw, _ := output.(*tabwriter.Writer); tw != nil {
+ err = tw.Flush()
+ }
+
+ return err
+}
+
+// Fprint "pretty-prints" an HCL node to output
+// It calls Config.Fprint with default settings.
+func Fprint(output io.Writer, node ast.Node) error {
+ return DefaultConfig.Fprint(output, node)
+}
+
+// Format formats src HCL and returns the result.
+func Format(src []byte) ([]byte, error) {
+ node, err := parser.Parse(src)
+ if err != nil {
+ return nil, err
+ }
+
+ var buf bytes.Buffer
+ if err := DefaultConfig.Fprint(&buf, node); err != nil {
+ return nil, err
+ }
+
+ // Add trailing newline to result
+ buf.WriteString("\n")
+
+ return buf.Bytes(), nil
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go
new file mode 100644
index 0000000..abb22a3
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go
@@ -0,0 +1,140 @@
+// +build !windows
+// TODO(jen20): These need fixing on Windows but printer is not used right now
+// and red CI is making it harder to process other bugs, so ignore until
+// we get around to fixing them.package printer
+
+package printer
+
+import (
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "testing"
+
+ "github.com/hashicorp/hcl/hcl/parser"
+)
+
+var update = flag.Bool("update", false, "update golden files")
+
+const (
+ dataDir = "testdata"
+)
+
+type entry struct {
+ source, golden string
+}
+
+// Use go test -update to create/update the respective golden files.
+var data = []entry{
+ {"complexhcl.input", "complexhcl.golden"},
+ {"list.input", "list.golden"},
+ {"comment.input", "comment.golden"},
+ {"comment_aligned.input", "comment_aligned.golden"},
+ {"comment_standalone.input", "comment_standalone.golden"},
+ {"empty_block.input", "empty_block.golden"},
+ {"list_of_objects.input", "list_of_objects.golden"},
+}
+
+func TestFiles(t *testing.T) {
+ for _, e := range data {
+ source := filepath.Join(dataDir, e.source)
+ golden := filepath.Join(dataDir, e.golden)
+ check(t, source, golden)
+ }
+}
+
+func check(t *testing.T, source, golden string) {
+ src, err := ioutil.ReadFile(source)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ res, err := format(src)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ // update golden files if necessary
+ if *update {
+ if err := ioutil.WriteFile(golden, res, 0644); err != nil {
+ t.Error(err)
+ }
+ return
+ }
+
+ // get golden
+ gld, err := ioutil.ReadFile(golden)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ // formatted source and golden must be the same
+ if err := diff(source, golden, res, gld); err != nil {
+ t.Error(err)
+ return
+ }
+}
+
+// diff compares a and b.
+func diff(aname, bname string, a, b []byte) error {
+ var buf bytes.Buffer // holding long error message
+
+ // compare lengths
+ if len(a) != len(b) {
+ fmt.Fprintf(&buf, "\nlength changed: len(%s) = %d, len(%s) = %d", aname, len(a), bname, len(b))
+ }
+
+ // compare contents
+ line := 1
+ offs := 1
+ for i := 0; i < len(a) && i < len(b); i++ {
+ ch := a[i]
+ if ch != b[i] {
+ fmt.Fprintf(&buf, "\n%s:%d:%d: %s", aname, line, i-offs+1, lineAt(a, offs))
+ fmt.Fprintf(&buf, "\n%s:%d:%d: %s", bname, line, i-offs+1, lineAt(b, offs))
+ fmt.Fprintf(&buf, "\n\n")
+ break
+ }
+ if ch == '\n' {
+ line++
+ offs = i + 1
+ }
+ }
+
+ if buf.Len() > 0 {
+ return errors.New(buf.String())
+ }
+ return nil
+}
+
+// format parses src, prints the corresponding AST, verifies the resulting
+// src is syntactically correct, and returns the resulting src or an error
+// if any.
+func format(src []byte) ([]byte, error) {
+ formatted, err := Format(src)
+ if err != nil {
+ return nil, err
+ }
+
+ // make sure formatted output is syntactically correct
+ if _, err := parser.Parse(formatted); err != nil {
+ return nil, fmt.Errorf("parse: %s\n%s", err, src)
+ }
+
+ return formatted, nil
+}
+
+// lineAt returns the line in text starting at offset offs.
+func lineAt(text []byte, offs int) []byte {
+ i := offs
+ for i < len(text) && text[i] != '\n' {
+ i++
+ }
+ return text[offs:i]
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.golden
new file mode 100644
index 0000000..9d4b072
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.golden
@@ -0,0 +1,36 @@
+// A standalone comment is a comment which is not attached to any kind of node
+
+// This comes from Terraform, as a test
+variable "foo" {
+ # Standalone comment should be still here
+
+ default = "bar"
+ description = "bar" # yooo
+}
+
+/* This is a multi line standalone
+comment*/
+
+// fatih arslan
+/* This is a developer test
+account and a multine comment */
+developer = ["fatih", "arslan"] // fatih arslan
+
+# One line here
+numbers = [1, 2] // another line here
+
+# Another comment
+variable = {
+ description = "bar" # another yooo
+
+ foo {
+ # Nested standalone
+
+ bar = "fatih"
+ }
+}
+
+// lead comment
+foo {
+ bar = "fatih" // line comment 2
+} // line comment 3
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.input
new file mode 100644
index 0000000..57c37ac
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.input
@@ -0,0 +1,37 @@
+// A standalone comment is a comment which is not attached to any kind of node
+
+ // This comes from Terraform, as a test
+variable "foo" {
+ # Standalone comment should be still here
+
+ default = "bar"
+ description = "bar" # yooo
+}
+
+/* This is a multi line standalone
+comment*/
+
+
+// fatih arslan
+/* This is a developer test
+account and a multine comment */
+developer = [ "fatih", "arslan"] // fatih arslan
+
+# One line here
+numbers = [1,2] // another line here
+
+ # Another comment
+variable = {
+ description = "bar" # another yooo
+ foo {
+ # Nested standalone
+
+ bar = "fatih"
+ }
+}
+
+ // lead comment
+foo {
+ bar = "fatih" // line comment 2
+} // line comment 3
+
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.golden
new file mode 100644
index 0000000..6ff2150
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.golden
@@ -0,0 +1,32 @@
+aligned {
+ # We have some aligned items below
+ foo = "fatih" # yoo1
+ default = "bar" # yoo2
+ bar = "bar and foo" # yoo3
+
+ default = {
+ bar = "example"
+ }
+
+ #deneme arslan
+ fatih = ["fatih"] # yoo4
+
+ #fatih arslan
+ fatiharslan = ["arslan"] // yoo5
+
+ default = {
+ bar = "example"
+ }
+
+ security_groups = [
+ "foo", # kenya 1
+ "${aws_security_group.firewall.foo}", # kenya 2
+ ]
+
+ security_groups2 = [
+ "foo", # kenya 1
+ "bar", # kenya 1.5
+ "${aws_security_group.firewall.foo}", # kenya 2
+ "foobar", # kenya 3
+ ]
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.input
new file mode 100644
index 0000000..bd43ab1
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.input
@@ -0,0 +1,28 @@
+aligned {
+# We have some aligned items below
+ foo = "fatih" # yoo1
+ default = "bar" # yoo2
+ bar = "bar and foo" # yoo3
+ default = {
+ bar = "example"
+ }
+ #deneme arslan
+ fatih = ["fatih"] # yoo4
+ #fatih arslan
+ fatiharslan = ["arslan"] // yoo5
+ default = {
+ bar = "example"
+ }
+
+security_groups = [
+ "foo", # kenya 1
+ "${aws_security_group.firewall.foo}", # kenya 2
+]
+
+security_groups2 = [
+ "foo", # kenya 1
+ "bar", # kenya 1.5
+ "${aws_security_group.firewall.foo}", # kenya 2
+ "foobar", # kenya 3
+]
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_standalone.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_standalone.golden
new file mode 100644
index 0000000..3236d9e
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_standalone.golden
@@ -0,0 +1,17 @@
+// A standalone comment
+
+aligned {
+ # Standalone 1
+
+ a = "bar" # yoo1
+ default = "bar" # yoo2
+
+ # Standalone 2
+}
+
+# Standalone 3
+
+numbers = [1, 2] // another line here
+
+# Standalone 4
+
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_standalone.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_standalone.input
new file mode 100644
index 0000000..4436cb1
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_standalone.input
@@ -0,0 +1,16 @@
+// A standalone comment
+
+aligned {
+ # Standalone 1
+
+ a = "bar" # yoo1
+ default = "bar" # yoo2
+
+ # Standalone 2
+}
+
+ # Standalone 3
+
+numbers = [1,2] // another line here
+
+ # Standalone 4
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/complexhcl.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/complexhcl.golden
new file mode 100644
index 0000000..198c32d
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/complexhcl.golden
@@ -0,0 +1,54 @@
+variable "foo" {
+ default = "bar"
+ description = "bar"
+}
+
+developer = ["fatih", "arslan"]
+
+provider "aws" {
+ access_key = "foo"
+ secret_key = "bar"
+}
+
+provider "do" {
+ api_key = "${var.foo}"
+}
+
+resource "aws_security_group" "firewall" {
+ count = 5
+}
+
+resource aws_instance "web" {
+ ami = "${var.foo}"
+
+ security_groups = [
+ "foo",
+ "${aws_security_group.firewall.foo}",
+ ]
+
+ network_interface {
+ device_index = 0
+ description = "Main network interface"
+ }
+
+ network_interface = {
+ device_index = 1
+
+ description = < 0 for %q", s.ErrorCount, src)
+ }
+}
+
+func testTokenList(t *testing.T, tokenList []tokenPair) {
+ // create artifical source code
+ buf := new(bytes.Buffer)
+ for _, ident := range tokenList {
+ fmt.Fprintf(buf, "%s\n", ident.text)
+ }
+
+ s := New(buf.Bytes())
+ for _, ident := range tokenList {
+ tok := s.Scan()
+ if tok.Type != ident.tok {
+ t.Errorf("tok = %q want %q for %q\n", tok, ident.tok, ident.text)
+ }
+
+ if tok.Text != ident.text {
+ t.Errorf("text = %q want %q", tok.String(), ident.text)
+ }
+
+ }
+}
+
+func countNewlines(s string) int {
+ n := 0
+ for _, ch := range s {
+ if ch == '\n' {
+ n++
+ }
+ }
+ return n
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go
new file mode 100644
index 0000000..af2d848
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go
@@ -0,0 +1,92 @@
+package strconv
+
+import "testing"
+
+type quoteTest struct {
+ in string
+ out string
+ ascii string
+}
+
+var quotetests = []quoteTest{
+ {"\a\b\f\r\n\t\v", `"\a\b\f\r\n\t\v"`, `"\a\b\f\r\n\t\v"`},
+ {"\\", `"\\"`, `"\\"`},
+ {"abc\xffdef", `"abc\xffdef"`, `"abc\xffdef"`},
+ {"\u263a", `"☺"`, `"\u263a"`},
+ {"\U0010ffff", `"\U0010ffff"`, `"\U0010ffff"`},
+ {"\x04", `"\x04"`, `"\x04"`},
+}
+
+type unQuoteTest struct {
+ in string
+ out string
+}
+
+var unquotetests = []unQuoteTest{
+ {`""`, ""},
+ {`"a"`, "a"},
+ {`"abc"`, "abc"},
+ {`"☺"`, "☺"},
+ {`"hello world"`, "hello world"},
+ {`"\xFF"`, "\xFF"},
+ {`"\377"`, "\377"},
+ {`"\u1234"`, "\u1234"},
+ {`"\U00010111"`, "\U00010111"},
+ {`"\U0001011111"`, "\U0001011111"},
+ {`"\a\b\f\n\r\t\v\\\""`, "\a\b\f\n\r\t\v\\\""},
+ {`"'"`, "'"},
+ {`"${file("foo")}"`, `${file("foo")}`},
+ {`"${file("\"foo\"")}"`, `${file("\"foo\"")}`},
+ {`"echo ${var.region}${element(split(",",var.zones),0)}"`,
+ `echo ${var.region}${element(split(",",var.zones),0)}`},
+ {`"${HH\\:mm\\:ss}"`, `${HH\:mm\:ss}`},
+}
+
+var misquoted = []string{
+ ``,
+ `"`,
+ `"a`,
+ `"'`,
+ `b"`,
+ `"\"`,
+ `"\9"`,
+ `"\19"`,
+ `"\129"`,
+ `'\'`,
+ `'\9'`,
+ `'\19'`,
+ `'\129'`,
+ `'ab'`,
+ `"\x1!"`,
+ `"\U12345678"`,
+ `"\z"`,
+ "`",
+ "`xxx",
+ "`\"",
+ `"\'"`,
+ `'\"'`,
+ "'\n'",
+ `"${"`,
+ `"${foo{}"`,
+}
+
+func TestUnquote(t *testing.T) {
+ for _, tt := range unquotetests {
+ if out, err := Unquote(tt.in); err != nil || out != tt.out {
+ t.Errorf("Unquote(%#q) = %q, %v want %q, nil", tt.in, out, err, tt.out)
+ }
+ }
+
+ // run the quote tests too, backward
+ for _, tt := range quotetests {
+ if in, err := Unquote(tt.out); in != tt.in {
+ t.Errorf("Unquote(%#q) = %q, %v, want %q, nil", tt.out, in, err, tt.in)
+ }
+ }
+
+ for _, s := range misquoted {
+ if out, err := Unquote(s); out != "" || err != ErrSyntax {
+ t.Errorf("Unquote(%#q) = %q, %v want %q, %v", s, out, err, "", ErrSyntax)
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/array_comment.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/array_comment.hcl
new file mode 100644
index 0000000..78c2675
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/array_comment.hcl
@@ -0,0 +1,4 @@
+foo = [
+ "1",
+ "2", # comment
+]
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/assign_colon.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/assign_colon.hcl
new file mode 100644
index 0000000..eb5a99a
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/assign_colon.hcl
@@ -0,0 +1,6 @@
+resource = [{
+ "foo": {
+ "bar": {},
+ "baz": [1, 2, "foo"],
+ }
+}]
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment.hcl
new file mode 100644
index 0000000..1ff7f29
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment.hcl
@@ -0,0 +1,15 @@
+// Foo
+
+/* Bar */
+
+/*
+/*
+Baz
+*/
+
+# Another
+
+# Multiple
+# Lines
+
+foo = "bar"
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment_single.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment_single.hcl
new file mode 100644
index 0000000..fec5601
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment_single.hcl
@@ -0,0 +1 @@
+# Hello
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex.hcl
new file mode 100644
index 0000000..cccb5b0
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex.hcl
@@ -0,0 +1,42 @@
+// This comes from Terraform, as a test
+variable "foo" {
+ default = "bar"
+ description = "bar"
+}
+
+provider "aws" {
+ access_key = "foo"
+ secret_key = "bar"
+}
+
+provider "do" {
+ api_key = "${var.foo}"
+}
+
+resource "aws_security_group" "firewall" {
+ count = 5
+}
+
+resource aws_instance "web" {
+ ami = "${var.foo}"
+ security_groups = [
+ "foo",
+ "${aws_security_group.firewall.foo}"
+ ]
+
+ network_interface {
+ device_index = 0
+ description = "Main network interface"
+ }
+}
+
+resource "aws_instance" "db" {
+ security_groups = "${aws_security_group.firewall.*.id}"
+ VPC = "foo"
+
+ depends_on = ["aws_instance.web"]
+}
+
+output "web_ip" {
+ value = "${aws_instance.web.private_ip}"
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex_key.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex_key.hcl
new file mode 100644
index 0000000..0007aaf
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex_key.hcl
@@ -0,0 +1 @@
+foo.bar = "baz"
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/empty.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/empty.hcl
new file mode 100644
index 0000000..e69de29
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list.hcl
new file mode 100644
index 0000000..059d4ce
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list.hcl
@@ -0,0 +1 @@
+foo = [1, 2, "foo"]
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list_comma.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list_comma.hcl
new file mode 100644
index 0000000..50f4218
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list_comma.hcl
@@ -0,0 +1 @@
+foo = [1, 2, "foo",]
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/multiple.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/multiple.hcl
new file mode 100644
index 0000000..029c54b
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/multiple.hcl
@@ -0,0 +1,2 @@
+foo = "bar"
+key = 7
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/old.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/old.hcl
new file mode 100644
index 0000000..e9f77ca
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/old.hcl
@@ -0,0 +1,3 @@
+default = {
+ "eu-west-1": "ami-b1cf19c6",
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure.hcl
new file mode 100644
index 0000000..92592fb
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure.hcl
@@ -0,0 +1,5 @@
+// This is a test structure for the lexer
+foo bar "baz" {
+ key = 7
+ foo = "bar"
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_basic.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_basic.hcl
new file mode 100644
index 0000000..7229a1f
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_basic.hcl
@@ -0,0 +1,5 @@
+foo {
+ value = 7
+ "value" = 8
+ "complex::value" = 9
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_empty.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_empty.hcl
new file mode 100644
index 0000000..4d156dd
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_empty.hcl
@@ -0,0 +1 @@
+resource "foo" "bar" {}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/types.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/types.hcl
new file mode 100644
index 0000000..cf2747e
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/types.hcl
@@ -0,0 +1,7 @@
+foo = "bar"
+bar = 7
+baz = [1,2,3]
+foo = -12
+bar = 3.14159
+foo = true
+bar = false
diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token_test.go b/vendor/github.com/hashicorp/hcl/hcl/token/token_test.go
new file mode 100644
index 0000000..932951c
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/token/token_test.go
@@ -0,0 +1,63 @@
+package token
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestTypeString(t *testing.T) {
+ var tokens = []struct {
+ tt Type
+ str string
+ }{
+ {ILLEGAL, "ILLEGAL"},
+ {EOF, "EOF"},
+ {COMMENT, "COMMENT"},
+ {IDENT, "IDENT"},
+ {NUMBER, "NUMBER"},
+ {FLOAT, "FLOAT"},
+ {BOOL, "BOOL"},
+ {STRING, "STRING"},
+ {HEREDOC, "HEREDOC"},
+ {LBRACK, "LBRACK"},
+ {LBRACE, "LBRACE"},
+ {COMMA, "COMMA"},
+ {PERIOD, "PERIOD"},
+ {RBRACK, "RBRACK"},
+ {RBRACE, "RBRACE"},
+ {ASSIGN, "ASSIGN"},
+ {ADD, "ADD"},
+ {SUB, "SUB"},
+ }
+
+ for _, token := range tokens {
+ if token.tt.String() != token.str {
+ t.Errorf("want: %q got:%q\n", token.str, token.tt)
+ }
+ }
+
+}
+
+func TestTokenValue(t *testing.T) {
+ var tokens = []struct {
+ tt Token
+ v interface{}
+ }{
+ {Token{Type: BOOL, Text: `true`}, true},
+ {Token{Type: BOOL, Text: `false`}, false},
+ {Token{Type: FLOAT, Text: `3.14`}, float64(3.14)},
+ {Token{Type: NUMBER, Text: `42`}, int64(42)},
+ {Token{Type: IDENT, Text: `foo`}, "foo"},
+ {Token{Type: STRING, Text: `"foo"`}, "foo"},
+ {Token{Type: STRING, Text: `"foo\nbar"`}, "foo\nbar"},
+ {Token{Type: STRING, Text: `"${file("foo")}"`}, `${file("foo")}`},
+ {Token{Type: HEREDOC, Text: "< 0 for %q", s.ErrorCount, src)
+ }
+}
+
+func testTokenList(t *testing.T, tokenList []tokenPair) {
+ // create artifical source code
+ buf := new(bytes.Buffer)
+ for _, ident := range tokenList {
+ fmt.Fprintf(buf, "%s\n", ident.text)
+ }
+
+ s := New(buf.Bytes())
+ for _, ident := range tokenList {
+ tok := s.Scan()
+ if tok.Type != ident.tok {
+ t.Errorf("tok = %q want %q for %q\n", tok, ident.tok, ident.text)
+ }
+
+ if tok.Text != ident.text {
+ t.Errorf("text = %q want %q", tok.String(), ident.text)
+ }
+
+ }
+}
+
+func countNewlines(s string) int {
+ n := 0
+ for _, ch := range s {
+ if ch == '\n' {
+ n++
+ }
+ }
+ return n
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/test-fixtures/array.json b/vendor/github.com/hashicorp/hcl/json/test-fixtures/array.json
new file mode 100644
index 0000000..e320f17
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/test-fixtures/array.json
@@ -0,0 +1,4 @@
+{
+ "foo": [1, 2, "bar"],
+ "bar": "baz"
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/test-fixtures/basic.json b/vendor/github.com/hashicorp/hcl/json/test-fixtures/basic.json
new file mode 100644
index 0000000..b54bde9
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/test-fixtures/basic.json
@@ -0,0 +1,3 @@
+{
+ "foo": "bar"
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/test-fixtures/object.json b/vendor/github.com/hashicorp/hcl/json/test-fixtures/object.json
new file mode 100644
index 0000000..72168a3
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/test-fixtures/object.json
@@ -0,0 +1,5 @@
+{
+ "foo": {
+ "bar": [1,2]
+ }
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/test-fixtures/types.json b/vendor/github.com/hashicorp/hcl/json/test-fixtures/types.json
new file mode 100644
index 0000000..9a142a6
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/test-fixtures/types.json
@@ -0,0 +1,10 @@
+{
+ "foo": "bar",
+ "bar": 7,
+ "baz": [1,2,3],
+ "foo": -12,
+ "bar": 3.14159,
+ "foo": true,
+ "bar": false,
+ "foo": null
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/token/token_test.go b/vendor/github.com/hashicorp/hcl/json/token/token_test.go
new file mode 100644
index 0000000..a83fdd5
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/token/token_test.go
@@ -0,0 +1,34 @@
+package token
+
+import (
+ "testing"
+)
+
+func TestTypeString(t *testing.T) {
+ var tokens = []struct {
+ tt Type
+ str string
+ }{
+ {ILLEGAL, "ILLEGAL"},
+ {EOF, "EOF"},
+ {NUMBER, "NUMBER"},
+ {FLOAT, "FLOAT"},
+ {BOOL, "BOOL"},
+ {STRING, "STRING"},
+ {NULL, "NULL"},
+ {LBRACK, "LBRACK"},
+ {LBRACE, "LBRACE"},
+ {COMMA, "COMMA"},
+ {PERIOD, "PERIOD"},
+ {RBRACK, "RBRACK"},
+ {RBRACE, "RBRACE"},
+ }
+
+ for _, token := range tokens {
+ if token.tt.String() != token.str {
+ t.Errorf("want: %q got:%q\n", token.str, token.tt)
+
+ }
+ }
+
+}
diff --git a/vendor/github.com/hashicorp/hcl/lex_test.go b/vendor/github.com/hashicorp/hcl/lex_test.go
new file mode 100644
index 0000000..8062764
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/lex_test.go
@@ -0,0 +1,37 @@
+package hcl
+
+import (
+ "testing"
+)
+
+func TestLexMode(t *testing.T) {
+ cases := []struct {
+ Input string
+ Mode lexModeValue
+ }{
+ {
+ "",
+ lexModeHcl,
+ },
+ {
+ "foo",
+ lexModeHcl,
+ },
+ {
+ "{}",
+ lexModeJson,
+ },
+ {
+ " {}",
+ lexModeJson,
+ },
+ }
+
+ for i, tc := range cases {
+ actual := lexMode([]byte(tc.Input))
+
+ if actual != tc.Mode {
+ t.Fatalf("%d: %#v", i, actual)
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/assign_deep.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/assign_deep.hcl
new file mode 100644
index 0000000..dd3151c
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/assign_deep.hcl
@@ -0,0 +1,5 @@
+resource = [{
+ foo = [{
+ bar = {}
+ }]
+}]
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/basic.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/basic.hcl
new file mode 100644
index 0000000..9499944
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/basic.hcl
@@ -0,0 +1,2 @@
+foo = "bar"
+bar = "${file("bing/bong.txt")}"
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/basic.json b/vendor/github.com/hashicorp/hcl/test-fixtures/basic.json
new file mode 100644
index 0000000..7bdddc8
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/basic.json
@@ -0,0 +1,4 @@
+{
+ "foo": "bar",
+ "bar": "${file(\"bing/bong.txt\")}"
+}
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/basic_int_string.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/basic_int_string.hcl
new file mode 100644
index 0000000..4e415da
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/basic_int_string.hcl
@@ -0,0 +1 @@
+count = "3"
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/basic_squish.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/basic_squish.hcl
new file mode 100644
index 0000000..363697b
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/basic_squish.hcl
@@ -0,0 +1,3 @@
+foo="bar"
+bar="${file("bing/bong.txt")}"
+foo-bar="baz"
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/block_assign.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/block_assign.hcl
new file mode 100644
index 0000000..ee8b06f
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/block_assign.hcl
@@ -0,0 +1,2 @@
+environment = "aws" {
+}
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.hcl
new file mode 100644
index 0000000..5b185cc
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.hcl
@@ -0,0 +1,15 @@
+key "" {
+ policy = "read"
+}
+
+key "foo/" {
+ policy = "write"
+}
+
+key "foo/bar/" {
+ policy = "read"
+}
+
+key "foo/bar/baz" {
+ policy = "deny"
+}
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.json b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.json
new file mode 100644
index 0000000..151864e
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.json
@@ -0,0 +1,19 @@
+{
+ "key": {
+ "": {
+ "policy": "read"
+ },
+
+ "foo/": {
+ "policy": "write"
+ },
+
+ "foo/bar/": {
+ "policy": "read"
+ },
+
+ "foo/bar/baz": {
+ "policy": "deny"
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.hcl
new file mode 100644
index 0000000..52dcaa1
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.hcl
@@ -0,0 +1,10 @@
+variable "foo" {
+ default = "bar"
+ description = "bar"
+}
+
+variable "amis" {
+ default = {
+ east = "foo"
+ }
+}
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.json b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.json
new file mode 100644
index 0000000..49f921e
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.json
@@ -0,0 +1,14 @@
+{
+ "variable": {
+ "foo": {
+ "default": "bar",
+ "description": "bar"
+ },
+
+ "amis": {
+ "default": {
+ "east": "foo"
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/empty.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/empty.hcl
new file mode 100644
index 0000000..5be1b23
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/empty.hcl
@@ -0,0 +1 @@
+resource "foo" {}
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/escape.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/escape.hcl
new file mode 100644
index 0000000..f818b15
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/escape.hcl
@@ -0,0 +1,6 @@
+foo = "bar\"baz\\n"
+bar = "new\nline"
+qux = "back\\slash"
+qax = "slash\\:colon"
+nested = "${HH\\:mm\\:ss}"
+nestedquotes = "${"\"stringwrappedinquotes\""}"
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/flat.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/flat.hcl
new file mode 100644
index 0000000..9bca551
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/flat.hcl
@@ -0,0 +1,2 @@
+foo = "bar"
+Key = 7
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/float.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/float.hcl
new file mode 100644
index 0000000..eed44e5
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/float.hcl
@@ -0,0 +1 @@
+a = 1.02
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/float.json b/vendor/github.com/hashicorp/hcl/test-fixtures/float.json
new file mode 100644
index 0000000..a9d1ab4
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/float.json
@@ -0,0 +1,3 @@
+{
+ "a": 1.02
+}
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/interpolate.json b/vendor/github.com/hashicorp/hcl/test-fixtures/interpolate.json
new file mode 100644
index 0000000..cad0151
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/interpolate.json
@@ -0,0 +1,3 @@
+{
+ "default": "${replace(\"europe-west\", \"-\", \" \")}"
+}
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/list_of_maps.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/list_of_maps.hcl
new file mode 100644
index 0000000..985a33b
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/list_of_maps.hcl
@@ -0,0 +1,4 @@
+foo = [
+ {somekey1 = "someval1"},
+ {somekey2 = "someval2", someextrakey = "someextraval"},
+]
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/multiline.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/multiline.hcl
new file mode 100644
index 0000000..f883bd7
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/multiline.hcl
@@ -0,0 +1,4 @@
+foo = <`
+ endpoint honors the values of `generate_lease`, `no_store`, `ttl` and
+ `max_ttl` from the given role [GH-2593]
+ * secret/pki: Add role parameter `allow_glob_domains` that enables defining
+ names in `allowed_domains` containing `*` glob patterns [GH-2517]
+ * secret/pki: Update certificate storage to not use characters that are not
+ supported on some filesystems [GH-2575]
+ * storage/etcd3: Add `discovery_srv` option to query for SRV records to find
+ servers [GH-2521]
+ * storage/s3: Support `max_parallel` option to limit concurrent outstanding
+ requests [GH-2466]
+ * storage/s3: Use pooled transport for http client [GH-2481]
+ * storage/swift: Allow domain values for V3 authentication [GH-2554]
+
+BUG FIXES:
+
+ * api: Respect a configured path in Vault's address [GH-2588]
+ * auth/aws-ec2: New bounds added as criteria to allow role creation [GH-2600]
+ * auth/ldap: Don't lowercase groups attached to users [GH-2613]
+ * secret/mssql: Update mssql driver to support queries with colons [GH-2610]
+ * secret/pki: Don't lowercase O/OU values in certs [GH-2555]
+ * secret/pki: Don't attempt to validate IP SANs if none are provided [GH-2574]
+ * secret/ssh: Don't automatically lowercase principles in issued SSH certs
+ [GH-2591]
+ * storage/consul: Properly handle state events rather than timing out
+ [GH-2548]
+ * storage/etcd3: Ensure locks are released if client is improperly shut down
+ [GH-2526]
+
+## 0.7.0 (March 21th, 2017)
+
+SECURITY:
+
+ * Common name not being validated when `exclude_cn_from_sans` option used in
+ `pki` backend: When using a role in the `pki` backend that specified the
+ `exclude_cn_from_sans` option, the common name would not then be properly
+ validated against the role's constraints. This has been fixed. We recommend
+ any users of this feature to upgrade to 0.7 as soon as feasible.
+
+DEPRECATIONS/CHANGES:
+
+ * List Operations Always Use Trailing Slash: Any list operation, whether via
+ the `GET` or `LIST` HTTP verb, will now internally canonicalize the path to
+ have a trailing slash. This makes policy writing more predictable, as it
+ means clients will no longer work or fail based on which client they're
+ using or which HTTP verb they're using. However, it also means that policies
+ allowing `list` capability must be carefully checked to ensure that they
+ contain a trailing slash; some policies may need to be split into multiple
+ stanzas to accommodate.
+ * PKI Defaults to Unleased Certificates: When issuing certificates from the
+ PKI backend, by default, no leases will be issued. If you want to manually
+ revoke a certificate, its serial number can be used with the `pki/revoke`
+ endpoint. Issuing leases is still possible by enabling the `generate_lease`
+ toggle in PKI role entries (this will default to `true` for upgrades, to
+ keep existing behavior), which will allow using lease IDs to revoke
+ certificates. For installations issuing large numbers of certificates (tens
+ to hundreds of thousands, or millions), this will significantly improve
+ Vault startup time since leases associated with these certificates will not
+ have to be loaded; however note that it also means that revocation of a
+ token used to issue certificates will no longer add these certificates to a
+ CRL. If this behavior is desired or needed, consider keeping leases enabled
+ and ensuring lifetimes are reasonable, and issue long-lived certificates via
+ a different role with leases disabled.
+
+FEATURES:
+
+ * **Replication (Enterprise)**: Vault Enterprise now has support for creating
+ a multi-datacenter replication set between clusters. The current replication
+ offering is based on an asynchronous primary/secondary (1:N) model that
+ replicates static data while keeping dynamic data (leases, tokens)
+ cluster-local, focusing on horizontal scaling for high-throughput and
+ high-fanout deployments.
+ * **Response Wrapping & Replication in the Vault Enterprise UI**: Vault
+ Enterprise UI now supports looking up and rotating response wrapping tokens,
+ as well as creating tokens with arbitrary values inside. It also now
+ supports replication functionality, enabling the configuration of a
+ replication set in the UI.
+ * **Expanded Access Control Policies**: Access control policies can now
+ specify allowed and denied parameters -- and, optionally, their values -- to
+ control what a client can and cannot submit during an API call. Policies can
+ also specify minimum/maximum response wrapping TTLs to both enforce the use
+ of response wrapping and control the duration of resultant wrapping tokens.
+ See the [policies concepts
+ page](https://www.vaultproject.io/docs/concepts/policies.html) for more
+ information.
+ * **SSH Backend As Certificate Authority**: The SSH backend can now be
+ configured to sign host and user certificates. Each mount of the backend
+ acts as an independent signing authority. The CA key pair can be configured
+ for each mount and the public key is accessible via an unauthenticated API
+ call; additionally, the backend can generate a public/private key pair for
+ you. We recommend using separate mounts for signing host and user
+ certificates.
+
+IMPROVEMENTS:
+
+ * api/request: Passing username and password information in API request
+ [GH-2469]
+ * audit: Logging the token's use count with authentication response and
+ logging the remaining uses of the client token with request [GH-2437]
+ * auth/approle: Support for restricting the number of uses on the tokens
+ issued [GH-2435]
+ * auth/aws-ec2: AWS EC2 auth backend now supports constraints for VPC ID,
+ Subnet ID and Region [GH-2407]
+ * auth/ldap: Use the value of the `LOGNAME` or `USER` env vars for the
+ username if not explicitly set on the command line when authenticating
+ [GH-2154]
+ * audit: Support adding a configurable prefix (such as `@cee`) before each
+ line [GH-2359]
+ * core: Canonicalize list operations to use a trailing slash [GH-2390]
+ * core: Add option to disable caching on a per-mount level [GH-2455]
+ * core: Add ability to require valid client certs in listener config [GH-2457]
+ * physical/dynamodb: Implement a session timeout to avoid having to use
+ recovery mode in the case of an unclean shutdown, which makes HA much safer
+ [GH-2141]
+ * secret/pki: O (Organization) values can now be set to role-defined values
+ for issued/signed certificates [GH-2369]
+ * secret/pki: Certificates issued/signed from PKI backend do not generate
+ leases by default [GH-2403]
+ * secret/pki: When using DER format, still return the private key type
+ [GH-2405]
+ * secret/pki: Add an intermediate to the CA chain even if it lacks an
+ authority key ID [GH-2465]
+ * secret/pki: Add role option to use CSR SANs [GH-2489]
+ * secret/ssh: SSH backend as CA to sign user and host certificates [GH-2208]
+ * secret/ssh: Support reading of SSH CA public key from `config/ca` endpoint
+ and also return it when CA key pair is generated [GH-2483]
+
+BUG FIXES:
+
+ * audit: When auditing headers use case-insensitive comparisons [GH-2362]
+ * auth/aws-ec2: Return role period in seconds and not nanoseconds [GH-2374]
+ * auth/okta: Fix panic if user had no local groups and/or policies set
+ [GH-2367]
+ * command/server: Fix parsing of redirect address when port is not mentioned
+ [GH-2354]
+ * physical/postgresql: Fix listing returning incorrect results if there were
+ multiple levels of children [GH-2393]
+
+## 0.6.5 (February 7th, 2017)
+
+FEATURES:
+
+ * **Okta Authentication**: A new Okta authentication backend allows you to use
+ Okta usernames and passwords to authenticate to Vault. If provided with an
+ appropriate Okta API token, group membership can be queried to assign
+ policies; users and groups can be defined locally as well.
+ * **RADIUS Authentication**: A new RADIUS authentication backend allows using
+ a RADIUS server to authenticate to Vault. Policies can be configured for
+ specific users or for any authenticated user.
+ * **Exportable Transit Keys**: Keys in `transit` can now be marked as
+ `exportable` at creation time. This allows a properly ACL'd user to retrieve
+ the associated signing key, encryption key, or HMAC key. The `exportable`
+ value is returned on a key policy read and cannot be changed, so if a key is
+ marked `exportable` it will always be exportable, and if it is not it will
+ never be exportable.
+ * **Batch Transit Operations**: `encrypt`, `decrypt` and `rewrap` operations
+ in the transit backend now support processing multiple input items in one
+ call, returning the output of each item in the response.
+ * **Configurable Audited HTTP Headers**: You can now specify headers that you
+ want to have included in each audit entry, along with whether each header
+ should be HMAC'd or kept plaintext. This can be useful for adding additional
+ client or network metadata to the audit logs.
+ * **Transit Backend UI (Enterprise)**: Vault Enterprise UI now supports the transit
+ backend, allowing creation, viewing and editing of named keys as well as using
+ those keys to perform supported transit operations directly in the UI.
+ * **Socket Audit Backend** A new socket audit backend allows audit logs to be sent
+ through TCP, UDP, or UNIX Sockets.
+
+IMPROVEMENTS:
+
+ * auth/aws-ec2: Add support for cross-account auth using STS [GH-2148]
+ * auth/aws-ec2: Support issuing periodic tokens [GH-2324]
+ * auth/github: Support listing teams and users [GH-2261]
+ * auth/ldap: Support adding policies to local users directly, in addition to
+ local groups [GH-2152]
+ * command/server: Add ability to select and prefer server cipher suites
+ [GH-2293]
+ * core: Add a nonce to unseal operations as a check (useful mostly for
+ support, not as a security principle) [GH-2276]
+ * duo: Added ability to supply extra context to Duo pushes [GH-2118]
+ * physical/consul: Add option for setting consistency mode on Consul gets
+ [GH-2282]
+ * physical/etcd: Full v3 API support; code will autodetect which API version
+ to use. The v3 code path is significantly less complicated and may be much
+ more stable. [GH-2168]
+ * secret/pki: Allow specifying OU entries in generated certificate subjects
+ [GH-2251]
+ * secret mount ui (Enterprise): the secret mount list now shows all mounted
+ backends even if the UI cannot browse them. Additional backends can now be
+ mounted from the UI as well.
+
+BUG FIXES:
+
+ * auth/token: Fix regression in 0.6.4 where using token store roles as a
+ blacklist (with only `disallowed_policies` set) would not work in most
+ circumstances [GH-2286]
+ * physical/s3: Page responses in client so list doesn't truncate [GH-2224]
+ * secret/cassandra: Stop a connection leak that could occur on active node
+ failover [GH-2313]
+ * secret/pki: When using `sign-verbatim`, don't require a role and use the
+ CSR's common name [GH-2243]
+
+## 0.6.4 (December 16, 2016)
+
+SECURITY:
+
+Further details about these security issues can be found in the 0.6.4 upgrade
+guide.
+
+ * `default` Policy Privilege Escalation: If a parent token did not have the
+ `default` policy attached to its token, it could still create children with
+ the `default` policy. This is no longer allowed (unless the parent has
+ `sudo` capability for the creation path). In most cases this is low severity
+ since the access grants in the `default` policy are meant to be access
+ grants that are acceptable for all tokens to have.
+ * Leases Not Expired When Limited Use Token Runs Out of Uses: When using
+ limited-use tokens to create leased secrets, if the limited-use token was
+ revoked due to running out of uses (rather than due to TTL expiration or
+ explicit revocation) it would fail to revoke the leased secrets. These
+ secrets would still be revoked when their TTL expired, limiting the severity
+ of this issue. An endpoint has been added (`auth/token/tidy`) that can
+ perform housekeeping tasks on the token store; one of its tasks can detect
+ this situation and revoke the associated leases.
+
+FEATURES:
+
+ * **Policy UI (Enterprise)**: Vault Enterprise UI now supports viewing,
+ creating, and editing policies.
+
+IMPROVEMENTS:
+
+ * http: Vault now sets a `no-store` cache control header to make it more
+ secure in setups that are not end-to-end encrypted [GH-2183]
+
+BUG FIXES:
+
+ * auth/ldap: Don't panic if dialing returns an error and starttls is enabled;
+ instead, return the error [GH-2188]
+ * ui (Enterprise): Submitting an unseal key now properly resets the
+ form so a browser refresh isn't required to continue.
+
+## 0.6.3 (December 6, 2016)
+
+DEPRECATIONS/CHANGES:
+
+ * Request size limitation: A maximum request size of 32MB is imposed to
+ prevent a denial of service attack with arbitrarily large requests [GH-2108]
+ * LDAP denies passwordless binds by default: In new LDAP mounts, or when
+ existing LDAP mounts are rewritten, passwordless binds will be denied by
+ default. The new `deny_null_bind` parameter can be set to `false` to allow
+ these. [GH-2103]
+ * Any audit backend activated satisfies conditions: Previously, when a new
+ Vault node was taking over service in an HA cluster, all audit backends were
+ required to be loaded successfully to take over active duty. This behavior
+ now matches the behavior of the audit logging system itself: at least one
+ audit backend must successfully be loaded. The server log contains an error
+ when this occurs. This helps keep a Vault HA cluster working when there is a
+ misconfiguration on a standby node. [GH-2083]
+
+FEATURES:
+
+ * **Web UI (Enterprise)**: Vault Enterprise now contains a built-in web UI
+ that offers access to a number of features, including init/unsealing/sealing,
+ authentication via userpass or LDAP, and K/V reading/writing. The capability
+ set of the UI will be expanding rapidly in further releases. To enable it,
+ set `ui = true` in the top level of Vault's configuration file and point a
+ web browser at your Vault address.
+ * **Google Cloud Storage Physical Backend**: You can now use GCS for storing
+ Vault data [GH-2099]
+
+IMPROVEMENTS:
+
+ * auth/github: Policies can now be assigned to users as well as to teams
+ [GH-2079]
+ * cli: Set the number of retries on 500 down to 0 by default (no retrying). It
+ can be very confusing to users when there is a pause while the retries
+ happen if they haven't explicitly set it. With request forwarding the need
+ for this is lessened anyways. [GH-2093]
+ * core: Response wrapping is now allowed to be specified by backend responses
+ (requires backends gaining support) [GH-2088]
+ * physical/consul: When announcing service, use the scheme of the Vault server
+ rather than the Consul client [GH-2146]
+ * secret/consul: Added listing functionality to roles [GH-2065]
+ * secret/postgresql: Added `revocation_sql` parameter on the role endpoint to
+ enable customization of user revocation SQL statements [GH-2033]
+ * secret/transit: Add listing of keys [GH-1987]
+
+BUG FIXES:
+
+ * api/unwrap, command/unwrap: Increase compatibility of `unwrap` command with
+ Vault 0.6.1 and older [GH-2014]
+ * api/unwrap, command/unwrap: Fix error when no client token exists [GH-2077]
+ * auth/approle: Creating the index for the role_id properly [GH-2004]
+ * auth/aws-ec2: Handle the case of multiple upgrade attempts when setting the
+ instance-profile ARN [GH-2035]
+ * auth/ldap: Avoid leaking connections on login [GH-2130]
+ * command/path-help: Use the actual error generated by Vault rather than
+ always using 500 when there is a path help error [GH-2153]
+ * command/ssh: Use temporary file for identity and ensure its deletion before
+ the command returns [GH-2016]
+ * cli: Fix error printing values with `-field` if the values contained
+ formatting directives [GH-2109]
+ * command/server: Don't say mlock is supported on OSX when it isn't. [GH-2120]
+ * core: Fix bug where a failure to come up as active node (e.g. if an audit
+ backend failed) could lead to deadlock [GH-2083]
+ * physical/mysql: Fix potential crash during setup due to a query failure
+ [GH-2105]
+ * secret/consul: Fix panic on user error [GH-2145]
+
+## 0.6.2 (October 5, 2016)
+
+DEPRECATIONS/CHANGES:
+
+ * Convergent Encryption v2: New keys in `transit` using convergent mode will
+ use a new nonce derivation mechanism rather than require the user to supply
+ a nonce. While not explicitly increasing security, it minimizes the
+ likelihood that a user will use the mode improperly and impact the security
+ of their keys. Keys in convergent mode that were created in v0.6.1 will
+ continue to work with the same mechanism (user-supplied nonce).
+ * `etcd` HA off by default: Following in the footsteps of `dynamodb`, the
+ `etcd` storage backend now requires that `ha_enabled` be explicitly
+ specified in the configuration file. The backend currently has known broken
+ HA behavior, so this flag discourages use by default without explicitly
+ enabling it. If you are using this functionality, when upgrading, you should
+ set `ha_enabled` to `"true"` *before* starting the new versions of Vault.
+ * Default/Max lease/token TTLs are now 32 days: In previous versions of Vault
+ the default was 30 days, but moving it to 32 days allows some operations
+ (e.g. reauthenticating, renewing, etc.) to be performed via a monthly cron
+ job.
+ * AppRole Secret ID endpoints changed: Secret ID and Secret ID accessors are
+ no longer part of request URLs. The GET and DELETE operations are now moved
+ to new endpoints (`/lookup` and `/destroy`) which consumes the input from
+ the body and not the URL.
+ * AppRole requires at least one constraint: previously it was sufficient to
+ turn off all AppRole authentication constraints (secret ID, CIDR block) and
+ use the role ID only. It is now required that at least one additional
+ constraint is enabled. Existing roles are unaffected, but any new roles or
+ updated roles will require this.
+ * Reading wrapped responses from `cubbyhole/response` is deprecated. The
+ `sys/wrapping/unwrap` endpoint should be used instead as it provides
+ additional security, auditing, and other benefits. The ability to read
+ directly will be removed in a future release.
+ * Request Forwarding is now on by default: in 0.6.1 this required toggling on,
+ but is now enabled by default. This can be disabled via the
+ `"disable_clustering"` parameter in Vault's
+ [config](https://www.vaultproject.io/docs/config/index.html), or per-request
+ with the `X-Vault-No-Request-Forwarding` header.
+ * In prior versions a bug caused the `bound_iam_role_arn` value in the
+ `aws-ec2` authentication backend to actually use the instance profile ARN.
+ This has been corrected, but as a result there is a behavior change. To
+ match using the instance profile ARN, a new parameter
+ `bound_iam_instance_profile_arn` has been added. Existing roles will
+ automatically transfer the value over to the correct parameter, but the next
+ time the role is updated, the new meanings will take effect.
+
+FEATURES:
+
+ * **Secret ID CIDR Restrictions in `AppRole`**: Secret IDs generated under an
+ approle can now specify a list of CIDR blocks from where the requests to
+ generate secret IDs should originate from. If an approle already has CIDR
+ restrictions specified, the CIDR restrictions on the secret ID should be a
+ subset of those specified on the role [GH-1910]
+ * **Initial Root Token PGP Encryption**: Similar to `generate-root`, the root
+ token created at initialization time can now be PGP encrypted [GH-1883]
+ * **Support Chained Intermediate CAs in `pki`**: The `pki` backend now allows,
+ when a CA cert is being supplied as a signed root or intermediate, a trust
+ chain of arbitrary length. The chain is returned as a parameter at
+ certificate issue/sign time and is retrievable independently as well.
+ [GH-1694]
+ * **Response Wrapping Enhancements**: There are new endpoints to look up
+ response wrapped token parameters; wrap arbitrary values; rotate wrapping
+ tokens; and unwrap with enhanced validation. In addition, list operations
+ can now be response-wrapped. [GH-1927]
+ * Transit features: The `transit` backend now supports generating random bytes
+ and SHA sums; HMACs; and signing and verification functionality using EC
+ keys (P-256 curve)
+
+IMPROVEMENTS:
+
+ * api: Return error when an invalid (as opposed to incorrect) unseal key is
+ submitted, rather than ignoring it [GH-1782]
+ * api: Add method to call `auth/token/create-orphan` endpoint [GH-1834]
+ * api: Rekey operation now redirects from standbys to master [GH-1862]
+ * audit/file: Sending a `SIGHUP` to Vault now causes Vault to close and
+ re-open the log file, making it easier to rotate audit logs [GH-1953]
+ * auth/aws-ec2: EC2 instances can get authenticated by presenting the identity
+ document and its SHA256 RSA digest [GH-1961]
+ * auth/aws-ec2: IAM bound parameters on the aws-ec2 backend will perform a
+ prefix match instead of exact match [GH-1943]
+ * auth/aws-ec2: Added a new constraint `bound_iam_instance_profile_arn` to
+ refer to IAM instance profile ARN and fixed the earlier `bound_iam_role_arn`
+ to refer to IAM role ARN instead of the instance profile ARN [GH-1913]
+ * auth/aws-ec2: Backend generates the nonce by default and clients can
+ explicitly disable reauthentication by setting empty nonce [GH-1889]
+ * auth/token: Added warnings if tokens and accessors are used in URLs [GH-1806]
+ * command/format: The `format` flag on select CLI commands takes `yml` as an
+ alias for `yaml` [GH-1899]
+ * core: Allow the size of the read cache to be set via the config file, and
+ change the default value to 1MB (from 32KB) [GH-1784]
+ * core: Allow single and two-character path parameters for most places
+ [GH-1811]
+ * core: Allow list operations to be response-wrapped [GH-1814]
+ * core: Provide better protection against timing attacks in Shamir code
+ [GH-1877]
+ * core: Unmounting/disabling backends no longer returns an error if the mount
+ didn't exist. This is line with elsewhere in Vault's API where `DELETE` is
+ an idempotent operation. [GH-1903]
+ * credential/approle: At least one constraint is required to be enabled while
+ creating and updating a role [GH-1882]
+ * secret/cassandra: Added consistency level for use with roles [GH-1931]
+ * secret/mysql: SQL for revoking user can be configured on the role [GH-1914]
+ * secret/transit: Use HKDF (RFC 5869) as the key derivation function for new
+ keys [GH-1812]
+ * secret/transit: Empty plaintext values are now allowed [GH-1874]
+
+BUG FIXES:
+
+ * audit: Fix panic being caused by some values logging as underlying Go types
+ instead of formatted strings [GH-1912]
+ * auth/approle: Fixed panic on deleting approle that doesn't exist [GH-1920]
+ * auth/approle: Not letting secret IDs and secret ID accessors to get logged
+ in plaintext in audit logs [GH-1947]
+ * auth/aws-ec2: Allow authentication if the underlying host is in a bad state
+ but the instance is running [GH-1884]
+ * auth/token: Fixed metadata getting missed out from token lookup response by
+ gracefully handling token entry upgrade [GH-1924]
+ * cli: Don't error on newline in token file [GH-1774]
+ * core: Pass back content-type header for forwarded requests [GH-1791]
+ * core: Fix panic if the same key was given twice to `generate-root` [GH-1827]
+ * core: Fix potential deadlock on unmount/remount [GH-1793]
+ * physical/file: Remove empty directories from the `file` storage backend [GH-1821]
+ * physical/zookeeper: Remove empty directories from the `zookeeper` storage
+ backend and add a fix to the `file` storage backend's logic [GH-1964]
+ * secret/aws: Added update operation to `aws/sts` path to consider `ttl`
+ parameter [39b75c6]
+ * secret/aws: Mark STS secrets as non-renewable [GH-1804]
+ * secret/cassandra: Properly store session for re-use [GH-1802]
+ * secret/ssh: Fix panic when revoking SSH dynamic keys [GH-1781]
+
+## 0.6.1 (August 22, 2016)
+
+DEPRECATIONS/CHANGES:
+
+ * Once the active node is 0.6.1, standby nodes must also be 0.6.1 in order to
+ connect to the HA cluster. We recommend following our [general upgrade
+ instructions](https://www.vaultproject.io/docs/install/upgrade.html) in
+ addition to 0.6.1-specific upgrade instructions to ensure that this is not
+ an issue.
+ * Status codes for sealed/uninitialized Vaults have changed to `503`/`501`
+ respectively. See the [version-specific upgrade
+ guide](https://www.vaultproject.io/docs/install/upgrade-to-0.6.1.html) for
+ more details.
+ * Root tokens (tokens with the `root` policy) can no longer be created except
+ by another root token or the `generate-root` endpoint.
+ * Issued certificates from the `pki` backend against new roles created or
+ modified after upgrading will contain a set of default key usages.
+ * The `dynamodb` physical data store no longer supports HA by default. It has
+ some non-ideal behavior around failover that was causing confusion. See the
+ [documentation](https://www.vaultproject.io/docs/config/index.html#ha_enabled)
+ for information on enabling HA mode. It is very important that this
+ configuration is added _before upgrading_.
+ * The `ldap` backend no longer searches for `memberOf` groups as part of its
+ normal flow. Instead, the desired group filter must be specified. This fixes
+ some errors and increases speed for directories with different structures,
+ but if this behavior has been relied upon, ensure that you see the upgrade
+ notes _before upgrading_.
+ * `app-id` is now deprecated with the addition of the new AppRole backend.
+ There are no plans to remove it, but we encourage using AppRole whenever
+ possible, as it offers enhanced functionality and can accommodate many more
+ types of authentication paradigms.
+
+FEATURES:
+
+ * **AppRole Authentication Backend**: The `approle` backend is a
+ machine-oriented authentication backend that provides a similar concept to
+ App-ID while adding many missing features, including a pull model that
+ allows for the backend to generate authentication credentials rather than
+ requiring operators or other systems to push credentials in. It should be
+ useful in many more situations than App-ID. The inclusion of this backend
+ deprecates App-ID. [GH-1426]
+ * **Request Forwarding**: Vault servers can now forward requests to each other
+ rather than redirecting clients. This feature is off by default in 0.6.1 but
+ will be on by default in the next release. See the [HA concepts
+ page](https://www.vaultproject.io/docs/concepts/ha.html) for information on
+ enabling and configuring it. [GH-443]
+ * **Convergent Encryption in `Transit`**: The `transit` backend now supports a
+ convergent encryption mode where the same plaintext will produce the same
+ ciphertext. Although very useful in some situations, this has potential
+ security implications, which are mostly mitigated by requiring the use of
+ key derivation when convergent encryption is enabled. See [the `transit`
+ backend
+ documentation](https://www.vaultproject.io/docs/secrets/transit/index.html)
+ for more details. [GH-1537]
+ * **Improved LDAP Group Filters**: The `ldap` auth backend now uses templates
+ to define group filters, providing the capability to support some
+ directories that could not easily be supported before (especially specific
+ Active Directory setups with nested groups). [GH-1388]
+ * **Key Usage Control in `PKI`**: Issued certificates from roles created or
+ modified after upgrading contain a set of default key usages for increased
+ compatibility with OpenVPN and some other software. This set can be changed
+ when writing a role definition. Existing roles are unaffected. [GH-1552]
+ * **Request Retrying in the CLI and Go API**: Requests that fail with a `5xx`
+ error code will now retry after a backoff. The maximum total number of
+ retries (including disabling this functionality) can be set with an
+ environment variable. See the [environment variable
+ documentation](https://www.vaultproject.io/docs/commands/environment.html)
+ for more details. [GH-1594]
+ * **Service Discovery in `vault init`**: The new `-auto` option on `vault init`
+ will perform service discovery using Consul. When only one node is discovered,
+ it will be initialized and when more than one node is discovered, they will
+ be output for easy selection. See `vault init --help` for more details. [GH-1642]
+ * **MongoDB Secret Backend**: Generate dynamic unique MongoDB database
+ credentials based on configured roles. Sponsored by
+ [CommerceHub](http://www.commercehub.com/). [GH-1414]
+ * **Circonus Metrics Integration**: Vault can now send metrics to
+ [Circonus](http://www.circonus.com/). See the [configuration
+ documentation](https://www.vaultproject.io/docs/config/index.html) for
+ details. [GH-1646]
+
+IMPROVEMENTS:
+
+ * audit: Added a unique identifier to each request which will also be found in
+ the request portion of the response. [GH-1650]
+ * auth/aws-ec2: Added a new constraint `bound_account_id` to the role
+ [GH-1523]
+ * auth/aws-ec2: Added a new constraint `bound_iam_role_arn` to the role
+ [GH-1522]
+ * auth/aws-ec2: Added `ttl` field for the role [GH-1703]
+ * auth/ldap, secret/cassandra, physical/consul: Clients with `tls.Config`
+ have the minimum TLS version set to 1.2 by default. This is configurable.
+ * auth/token: Added endpoint to list accessors [GH-1676]
+ * auth/token: Added `disallowed_policies` option to token store roles [GH-1681]
+ * auth/token: `root` or `sudo` tokens can now create periodic tokens via
+ `auth/token/create`; additionally, the same token can now be periodic and
+ have an explicit max TTL [GH-1725]
+ * build: Add support for building on Solaris/Illumos [GH-1726]
+ * cli: Output formatting in the presence of warnings in the response object
+ [GH-1533]
+ * cli: `vault auth` command supports a `-path` option to take in the path at
+ which the auth backend is enabled, thereby allowing authenticating against
+ different paths using the command options [GH-1532]
+ * cli: `vault auth -methods` will now display the config settings of the mount
+ [GH-1531]
+ * cli: `vault read/write/unwrap -field` now allows selecting token response
+ fields [GH-1567]
+ * cli: `vault write -field` now allows selecting wrapped response fields
+ [GH-1567]
+ * command/status: Version information and cluster details added to the output
+ of `vault status` command [GH-1671]
+ * core: Response wrapping is now enabled for login endpoints [GH-1588]
+ * core: The duration of leadership is now exported via events through
+ telemetry [GH-1625]
+ * core: `sys/capabilities-self` is now accessible as part of the `default`
+ policy [GH-1695]
+ * core: `sys/renew` is now accessible as part of the `default` policy [GH-1701]
+ * core: Unseal keys will now be returned in both hex and base64 forms, and
+ either can be used [GH-1734]
+ * core: Responses from most `/sys` endpoints now return normal `api.Secret`
+ structs in addition to the values they carried before. This means that
+ response wrapping can now be used with most authenticated `/sys` operations
+ [GH-1699]
+ * physical/etcd: Support `ETCD_ADDR` env var for specifying addresses [GH-1576]
+ * physical/consul: Allowing additional tags to be added to Consul service
+ registration via `service_tags` option [GH-1643]
+ * secret/aws: Listing of roles is supported now [GH-1546]
+ * secret/cassandra: Add `connect_timeout` value for Cassandra connection
+ configuration [GH-1581]
+ * secret/mssql,mysql,postgresql: Reading of connection settings is supported
+ in all the sql backends [GH-1515]
+ * secret/mysql: Added optional maximum idle connections value to MySQL
+ connection configuration [GH-1635]
+ * secret/mysql: Use a combination of the role name and token display name in
+ generated user names and allow the length to be controlled [GH-1604]
+ * secret/{cassandra,mssql,mysql,postgresql}: SQL statements can now be passed
+ in via one of four ways: a semicolon-delimited string, a base64-delimited
+ string, a serialized JSON string array, or a base64-encoded serialized JSON
+ string array [GH-1686]
+ * secret/ssh: Added `allowed_roles` to vault-ssh-helper's config and returning
+ role name as part of response of `verify` API
+ * secret/ssh: Added passthrough of command line arguments to `ssh` [GH-1680]
+ * sys/health: Added version information to the response of health status
+ endpoint [GH-1647]
+ * sys/health: Cluster information isbe returned as part of health status when
+ Vault is unsealed [GH-1671]
+ * sys/mounts: MountTable data is compressed before serializing to accommodate
+ thousands of mounts [GH-1693]
+ * website: The [token
+ concepts](https://www.vaultproject.io/docs/concepts/tokens.html) page has
+ been completely rewritten [GH-1725]
+
+BUG FIXES:
+
+ * auth/aws-ec2: Added a nil check for stored whitelist identity object
+ during renewal [GH-1542]
+ * auth/cert: Fix panic if no client certificate is supplied [GH-1637]
+ * auth/token: Don't report that a non-expiring root token is renewable, as
+ attempting to renew it results in an error [GH-1692]
+ * cli: Don't retry a command when a redirection is received [GH-1724]
+ * core: Fix regression causing status codes to be `400` in most non-5xx error
+ cases [GH-1553]
+ * core: Fix panic that could occur during a leadership transition [GH-1627]
+ * physical/postgres: Remove use of prepared statements as this causes
+ connection multiplexing software to break [GH-1548]
+ * physical/consul: Multiple Vault nodes on the same machine leading to check ID
+ collisions were resulting in incorrect health check responses [GH-1628]
+ * physical/consul: Fix deregistration of health checks on exit [GH-1678]
+ * secret/postgresql: Check for existence of role before attempting deletion
+ [GH-1575]
+ * secret/postgresql: Handle revoking roles that have privileges on sequences
+ [GH-1573]
+ * secret/postgresql(,mysql,mssql): Fix incorrect use of database over
+ transaction object which could lead to connection exhaustion [GH-1572]
+ * secret/pki: Fix parsing CA bundle containing trailing whitespace [GH-1634]
+ * secret/pki: Fix adding email addresses as SANs [GH-1688]
+ * secret/pki: Ensure that CRL values are always UTC, per RFC [GH-1727]
+ * sys/seal-status: Fixed nil Cluster object while checking seal status [GH-1715]
+
+## 0.6.0 (June 14th, 2016)
+
+SECURITY:
+
+ * Although `sys/revoke-prefix` was intended to revoke prefixes of secrets (via
+ lease IDs, which incorporate path information) and
+ `auth/token/revoke-prefix` was intended to revoke prefixes of tokens (using
+ the tokens' paths and, since 0.5.2, role information), in implementation
+ they both behaved exactly the same way since a single component in Vault is
+ responsible for managing lifetimes of both, and the type of the tracked
+ lifetime was not being checked. The end result was that either endpoint
+ could revoke both secret leases and tokens. We consider this a very minor
+ security issue as there are a number of mitigating factors: both endpoints
+ require `sudo` capability in addition to write capability, preventing
+ blanket ACL path globs from providing access; both work by using the prefix
+ to revoke as a part of the endpoint path, allowing them to be properly
+ ACL'd; and both are intended for emergency scenarios and users should
+ already not generally have access to either one. In order to prevent
+ confusion, we have simply removed `auth/token/revoke-prefix` in 0.6, and
+ `sys/revoke-prefix` will be meant for both leases and tokens instead.
+
+DEPRECATIONS/CHANGES:
+
+ * `auth/token/revoke-prefix` has been removed. See the security notice for
+ details. [GH-1280]
+ * Vault will now automatically register itself as the `vault` service when
+ using the `consul` backend and will perform its own health checks. See
+ the Consul backend documentation for information on how to disable
+ auto-registration and service checks.
+ * List operations that do not find any keys now return a `404` status code
+ rather than an empty response object [GH-1365]
+ * CA certificates issued from the `pki` backend no longer have associated
+ leases, and any CA certs already issued will ignore revocation requests from
+ the lease manager. This is to prevent CA certificates from being revoked
+ when the token used to issue the certificate expires; it was not be obvious
+ to users that they need to ensure that the token lifetime needed to be at
+ least as long as a potentially very long-lived CA cert.
+
+FEATURES:
+
+ * **AWS EC2 Auth Backend**: Provides a secure introduction mechanism for AWS
+ EC2 instances allowing automated retrieval of Vault tokens. Unlike most
+ Vault authentication backends, this backend does not require first deploying
+ or provisioning security-sensitive credentials (tokens, username/password,
+ client certificates, etc). Instead, it treats AWS as a Trusted Third Party
+ and uses the cryptographically signed dynamic metadata information that
+ uniquely represents each EC2 instance. [Vault
+ Enterprise](https://www.hashicorp.com/vault.html) customers have access to a
+ turnkey client that speaks the backend API and makes access to a Vault token
+ easy.
+ * **Response Wrapping**: Nearly any response within Vault can now be wrapped
+ inside a single-use, time-limited token's cubbyhole, taking the [Cubbyhole
+ Authentication
+ Principles](https://www.hashicorp.com/blog/vault-cubbyhole-principles.html)
+ mechanism to its logical conclusion. Retrieving the original response is as
+ simple as a single API command or the new `vault unwrap` command. This makes
+ secret distribution easier and more secure, including secure introduction.
+ * **Azure Physical Backend**: You can now use Azure blob object storage as
+ your Vault physical data store [GH-1266]
+ * **Swift Physical Backend**: You can now use Swift blob object storage as
+ your Vault physical data store [GH-1425]
+ * **Consul Backend Health Checks**: The Consul backend will automatically
+ register a `vault` service and perform its own health checking. By default
+ the active node can be found at `active.vault.service.consul` and all with
+ standby nodes are `standby.vault.service.consul`. Sealed vaults are marked
+ critical and are not listed by default in Consul's service discovery. See
+ the documentation for details. [GH-1349]
+ * **Explicit Maximum Token TTLs**: You can now set explicit maximum TTLs on
+ tokens that do not honor changes in the system- or mount-set values. This is
+ useful, for instance, when the max TTL of the system or the `auth/token`
+ mount must be set high to accommodate certain needs but you want more
+ granular restrictions on tokens being issued directly from the Token
+ authentication backend at `auth/token`. [GH-1399]
+ * **Non-Renewable Tokens**: When creating tokens directly through the token
+ authentication backend, you can now specify in both token store roles and
+ the API whether or not a token should be renewable, defaulting to `true`.
+ * **RabbitMQ Secret Backend**: Vault can now generate credentials for
+ RabbitMQ. Vhosts and tags can be defined within roles. [GH-788]
+
+IMPROVEMENTS:
+
+ * audit: Add the DisplayName value to the copy of the Request object embedded
+ in the associated Response, to match the original Request object [GH-1387]
+ * audit: Enable auditing of the `seal` and `step-down` commands [GH-1435]
+ * backends: Remove most `root`/`sudo` paths in favor of normal ACL mechanisms.
+ A particular exception are any current MFA paths. A few paths in `token` and
+ `sys` also require `root` or `sudo`. [GH-1478]
+ * command/auth: Restore the previous authenticated token if the `auth` command
+ fails to authenticate the provided token [GH-1233]
+ * command/write: `-format` and `-field` can now be used with the `write`
+ command [GH-1228]
+ * core: Add `mlock` support for FreeBSD, OpenBSD, and Darwin [GH-1297]
+ * core: Don't keep lease timers around when tokens are revoked [GH-1277]
+ * core: If using the `disable_cache` option, caches for the policy store and
+ the `transit` backend are now disabled as well [GH-1346]
+ * credential/cert: Renewal requests are rejected if the set of policies has
+ changed since the token was issued [GH-477]
+ * credential/cert: Check CRLs for specific non-CA certs configured in the
+ backend [GH-1404]
+ * credential/ldap: If `groupdn` is not configured, skip searching LDAP and
+ only return policies for local groups, plus a warning [GH-1283]
+ * credential/ldap: `vault list` support for users and groups [GH-1270]
+ * credential/ldap: Support for the `memberOf` attribute for group membership
+ searching [GH-1245]
+ * credential/userpass: Add list support for users [GH-911]
+ * credential/userpass: Remove user configuration paths from requiring sudo, in
+ favor of normal ACL mechanisms [GH-1312]
+ * credential/token: Sanitize policies and add `default` policies in appropriate
+ places [GH-1235]
+ * credential/token: Setting the renewable status of a token is now possible
+ via `vault token-create` and the API. The default is true, but tokens can be
+ specified as non-renewable. [GH-1499]
+ * secret/aws: Use chain credentials to allow environment/EC2 instance/shared
+ providers [GH-307]
+ * secret/aws: Support for STS AssumeRole functionality [GH-1318]
+ * secret/consul: Reading consul access configuration supported. The response
+ will contain non-sensitive information only [GH-1445]
+ * secret/pki: Added `exclude_cn_from_sans` field to prevent adding the CN to
+ DNS or Email Subject Alternate Names [GH-1220]
+ * secret/pki: Added list support for certificates [GH-1466]
+ * sys/capabilities: Enforce ACL checks for requests that query the capabilities
+ of a token on a given path [GH-1221]
+ * sys/health: Status information can now be retrieved with `HEAD` [GH-1509]
+
+BUG FIXES:
+
+ * command/read: Fix panic when using `-field` with a non-string value [GH-1308]
+ * command/token-lookup: Fix TTL showing as 0 depending on how a token was
+ created. This only affected the value shown at lookup, not the token
+ behavior itself. [GH-1306]
+ * command/various: Tell the JSON decoder to not convert all numbers to floats;
+ fixes some various places where numbers were showing up in scientific
+ notation
+ * command/server: Prioritized `devRootTokenID` and `devListenAddress` flags
+ over their respective env vars [GH-1480]
+ * command/ssh: Provided option to disable host key checking. The automated
+ variant of `vault ssh` command uses `sshpass` which was failing to handle
+ host key checking presented by the `ssh` binary. [GH-1473]
+ * core: Properly persist mount-tuned TTLs for auth backends [GH-1371]
+ * core: Don't accidentally crosswire SIGINT to the reload handler [GH-1372]
+ * credential/github: Make organization comparison case-insensitive during
+ login [GH-1359]
+ * credential/github: Fix panic when renewing a token created with some earlier
+ versions of Vault [GH-1510]
+ * credential/github: The token used to log in via `vault auth` can now be
+ specified in the `VAULT_AUTH_GITHUB_TOKEN` environment variable [GH-1511]
+ * credential/ldap: Fix problem where certain error conditions when configuring
+ or opening LDAP connections would cause a panic instead of return a useful
+ error message [GH-1262]
+ * credential/token: Fall back to normal parent-token semantics if
+ `allowed_policies` is empty for a role. Using `allowed_policies` of
+ `default` resulted in the same behavior anyways. [GH-1276]
+ * credential/token: Fix issues renewing tokens when using the "suffix"
+ capability of token roles [GH-1331]
+ * credential/token: Fix lookup via POST showing the request token instead of
+ the desired token [GH-1354]
+ * credential/various: Fix renewal conditions when `default` policy is not
+ contained in the backend config [GH-1256]
+ * physical/s3: Don't panic in certain error cases from bad S3 responses [GH-1353]
+ * secret/consul: Use non-pooled Consul API client to avoid leaving files open
+ [GH-1428]
+ * secret/pki: Don't check whether a certificate is destined to be a CA
+ certificate if sign-verbatim endpoint is used [GH-1250]
+
+## 0.5.3 (May 27th, 2016)
+
+SECURITY:
+
+ * Consul ACL Token Revocation: An issue was reported to us indicating that
+ generated Consul ACL tokens were not being properly revoked. Upon
+ investigation, we found that this behavior was reproducible in a specific
+ scenario: when a generated lease for a Consul ACL token had been renewed
+ prior to revocation. In this case, the generated token was not being
+ properly persisted internally through the renewal function, leading to an
+ error during revocation due to the missing token. Unfortunately, this was
+ coded as a user error rather than an internal error, and the revocation
+ logic was expecting internal errors if revocation failed. As a result, the
+ revocation logic believed the revocation to have succeeded when it in fact
+ failed, causing the lease to be dropped while the token was still valid
+ within Consul. In this release, the Consul backend properly persists the
+ token through renewals, and the revocation logic has been changed to
+ consider any error type to have been a failure to revoke, causing the lease
+ to persist and attempt to be revoked later.
+
+We have written an example shell script that searches through Consul's ACL
+tokens and looks for those generated by Vault, which can be used as a template
+for a revocation script as deemed necessary for any particular security
+response. The script is available at
+https://gist.github.com/jefferai/6233c2963f9407a858d84f9c27d725c0
+
+Please note that any outstanding leases for Consul tokens produced prior to
+0.5.3 that have been renewed will continue to exhibit this behavior. As a
+result, we recommend either revoking all tokens produced by the backend and
+issuing new ones, or if needed, a more advanced variant of the provided example
+could use the timestamp embedded in each generated token's name to decide which
+tokens are too old and should be deleted. This could then be run periodically
+up until the maximum lease time for any outstanding pre-0.5.3 tokens has
+expired.
+
+This is a security-only release. There are no other code changes since 0.5.2.
+The binaries have one additional change: they are built against Go 1.6.1 rather
+than Go 1.6, as Go 1.6.1 contains two security fixes to the Go programming
+language itself.
+
+## 0.5.2 (March 16th, 2016)
+
+FEATURES:
+
+ * **MSSQL Backend**: Generate dynamic unique MSSQL database credentials based
+ on configured roles [GH-998]
+ * **Token Accessors**: Vault now provides an accessor with each issued token.
+ This accessor is an identifier that can be used for a limited set of
+ actions, notably for token revocation. This value can be logged in
+ plaintext to audit logs, and in combination with the plaintext metadata
+ logged to audit logs, provides a searchable and straightforward way to
+ revoke particular users' or services' tokens in many cases. To enable
+ plaintext audit logging of these accessors, set `hmac_accessor=false` when
+ enabling an audit backend.
+ * **Token Credential Backend Roles**: Roles can now be created in the `token`
+ credential backend that allow modifying token behavior in ways that are not
+ otherwise exposed or easily delegated. This allows creating tokens with a
+ fixed set (or subset) of policies (rather than a subset of the calling
+ token's), periodic tokens with a fixed TTL but no expiration, specified
+ prefixes, and orphans.
+ * **Listener Certificate Reloading**: Vault's configured listeners now reload
+ their TLS certificate and private key when the Vault process receives a
+ SIGHUP.
+
+IMPROVEMENTS:
+
+ * auth/token: Endpoints optionally accept tokens from the HTTP body rather
+ than just from the URLs [GH-1211]
+ * auth/token,sys/capabilities: Added new endpoints
+ `auth/token/lookup-accessor`, `auth/token/revoke-accessor` and
+ `sys/capabilities-accessor`, which enables performing the respective actions
+ with just the accessor of the tokens, without having access to the actual
+ token [GH-1188]
+ * core: Ignore leading `/` in policy paths [GH-1170]
+ * core: Ignore leading `/` in mount paths [GH-1172]
+ * command/policy-write: Provided HCL is now validated for format violations
+ and provides helpful information around where the violation occurred
+ [GH-1200]
+ * command/server: The initial root token ID when running in `-dev` mode can
+ now be specified via `-dev-root-token-id` or the environment variable
+ `VAULT_DEV_ROOT_TOKEN_ID` [GH-1162]
+ * command/server: The listen address when running in `-dev` mode can now be
+ specified via `-dev-listen-address` or the environment variable
+ `VAULT_DEV_LISTEN_ADDRESS` [GH-1169]
+ * command/server: The configured listeners now reload their TLS
+ certificates/keys when Vault is SIGHUP'd [GH-1196]
+ * command/step-down: New `vault step-down` command and API endpoint to force
+ the targeted node to give up active status, but without sealing. The node
+ will wait ten seconds before attempting to grab the lock again. [GH-1146]
+ * command/token-renew: Allow no token to be passed in; use `renew-self` in
+ this case. Change the behavior for any token being passed in to use `renew`.
+ [GH-1150]
+ * credential/app-id: Allow `app-id` parameter to be given in the login path;
+ this causes the `app-id` to be part of the token path, making it easier to
+ use with `revoke-prefix` [GH-424]
+ * credential/cert: Non-CA certificates can be used for authentication. They
+ must be matched exactly (issuer and serial number) for authentication, and
+ the certificate must carry the client authentication or 'any' extended usage
+ attributes. [GH-1153]
+ * credential/cert: Subject and Authority key IDs are output in metadata; this
+ allows more flexible searching/revocation in the audit logs [GH-1183]
+ * credential/cert: Support listing configured certs [GH-1212]
+ * credential/userpass: Add support for `create`/`update` capability
+ distinction in user path, and add user-specific endpoints to allow changing
+ the password and policies [GH-1216]
+ * credential/token: Add roles [GH-1155]
+ * secret/mssql: Add MSSQL backend [GH-998]
+ * secret/pki: Add revocation time (zero or Unix epoch) to `pki/cert/SERIAL`
+ endpoint [GH-1180]
+ * secret/pki: Sanitize serial number in `pki/revoke` endpoint to allow some
+ other formats [GH-1187]
+ * secret/ssh: Added documentation for `ssh/config/zeroaddress` endpoint.
+ [GH-1154]
+ * sys: Added new endpoints `sys/capabilities` and `sys/capabilities-self` to
+ fetch the capabilities of a token on a given path [GH-1171]
+ * sys: Added `sys/revoke-force`, which enables a user to ignore backend errors
+ when revoking a lease, necessary in some emergency/failure scenarios
+ [GH-1168]
+ * sys: The return codes from `sys/health` can now be user-specified via query
+ parameters [GH-1199]
+
+BUG FIXES:
+
+ * logical/cassandra: Apply hyphen/underscore replacement to the entire
+ generated username, not just the UUID, in order to handle token display name
+ hyphens [GH-1140]
+ * physical/etcd: Output actual error when cluster sync fails [GH-1141]
+ * vault/expiration: Not letting the error responses from the backends to skip
+ during renewals [GH-1176]
+
+## 0.5.1 (February 25th, 2016)
+
+DEPRECATIONS/CHANGES:
+
+ * RSA keys less than 2048 bits are no longer supported in the PKI backend.
+ 1024-bit keys are considered unsafe and are disallowed in the Internet PKI.
+ The `pki` backend has enforced SHA256 hashes in signatures from the
+ beginning, and software that can handle these hashes should be able to
+ handle larger key sizes. [GH-1095]
+ * The PKI backend now does not automatically delete expired certificates,
+ including from the CRL. Doing so could lead to a situation where a time
+ mismatch between the Vault server and clients could result in a certificate
+ that would not be considered expired by a client being removed from the CRL.
+ The new `pki/tidy` endpoint can be used to trigger expirations. [GH-1129]
+ * The `cert` backend now performs a variant of channel binding at renewal time
+ for increased security. In order to not overly burden clients, a notion of
+ identity is used. This functionality can be disabled. See the 0.5.1 upgrade
+ guide for more specific information [GH-1127]
+
+FEATURES:
+
+ * **Codebase Audit**: Vault's 0.5 codebase was audited by iSEC. (The terms of
+ the audit contract do not allow us to make the results public.) [GH-220]
+
+IMPROVEMENTS:
+
+ * api: The `VAULT_TLS_SERVER_NAME` environment variable can be used to control
+ the SNI header during TLS connections [GH-1131]
+ * api/health: Add the server's time in UTC to health responses [GH-1117]
+ * command/rekey and command/generate-root: These now return the status at
+ attempt initialization time, rather than requiring a separate fetch for the
+ nonce [GH-1054]
+ * credential/cert: Don't require root/sudo tokens for the `certs/` and `crls/`
+ paths; use normal ACL behavior instead [GH-468]
+ * credential/github: The validity of the token used for login will be checked
+ at renewal time [GH-1047]
+ * credential/github: The `config` endpoint no longer requires a root token;
+ normal ACL path matching applies
+ * deps: Use the standardized Go 1.6 vendoring system
+ * secret/aws: Inform users of AWS-imposed policy restrictions around STS
+ tokens if they attempt to use an invalid policy [GH-1113]
+ * secret/mysql: The MySQL backend now allows disabling verification of the
+ `connection_url` [GH-1096]
+ * secret/pki: Submitted CSRs are now verified to have the correct key type and
+ minimum number of bits according to the role. The exception is intermediate
+ CA signing and the `sign-verbatim` path [GH-1104]
+ * secret/pki: New `tidy` endpoint to allow expunging expired certificates.
+ [GH-1129]
+ * secret/postgresql: The PostgreSQL backend now allows disabling verification
+ of the `connection_url` [GH-1096]
+ * secret/ssh: When verifying an OTP, return 400 if it is not valid instead of
+ 204 [GH-1086]
+ * credential/app-id: App ID backend will check the validity of app-id and user-id
+ during renewal time [GH-1039]
+ * credential/cert: TLS Certificates backend, during renewal, will now match the
+ client identity with the client identity used during login [GH-1127]
+
+BUG FIXES:
+
+ * credential/ldap: Properly escape values being provided to search filters
+ [GH-1100]
+ * secret/aws: Capping on length of usernames for both IAM and STS types
+ [GH-1102]
+ * secret/pki: If a cert is not found during lookup of a serial number,
+ respond with a 400 rather than a 500 [GH-1085]
+ * secret/postgresql: Add extra revocation statements to better handle more
+ permission scenarios [GH-1053]
+ * secret/postgresql: Make connection_url work properly [GH-1112]
+
+## 0.5.0 (February 10, 2016)
+
+SECURITY:
+
+ * Previous versions of Vault could allow a malicious user to hijack the rekey
+ operation by canceling an operation in progress and starting a new one. The
+ practical application of this is very small. If the user was an unseal key
+ owner, they could attempt to do this in order to either receive unencrypted
+ reseal keys or to replace the PGP keys used for encryption with ones under
+ their control. However, since this would invalidate any rekey progress, they
+ would need other unseal key holders to resubmit, which would be rather
+ suspicious during this manual operation if they were not also the original
+ initiator of the rekey attempt. If the user was not an unseal key holder,
+ there is no benefit to be gained; the only outcome that could be attempted
+ would be a denial of service against a legitimate rekey operation by sending
+ cancel requests over and over. Thanks to Josh Snyder for the report!
+
+DEPRECATIONS/CHANGES:
+
+ * `s3` physical backend: Environment variables are now preferred over
+ configuration values. This makes it behave similar to the rest of Vault,
+ which, in increasing order of preference, uses values from the configuration
+ file, environment variables, and CLI flags. [GH-871]
+ * `etcd` physical backend: `sync` functionality is now supported and turned on
+ by default. This can be disabled. [GH-921]
+ * `transit`: If a client attempts to encrypt a value with a key that does not
+ yet exist, what happens now depends on the capabilities set in the client's
+ ACL policies. If the client has `create` (or `create` and `update`)
+ capability, the key will upsert as in the past. If the client has `update`
+ capability, they will receive an error. [GH-1012]
+ * `token-renew` CLI command: If the token given for renewal is the same as the
+ client token, the `renew-self` endpoint will be used in the API. Given that
+ the `default` policy (by default) allows all clients access to the
+ `renew-self` endpoint, this makes it much more likely that the intended
+ operation will be successful. [GH-894]
+ * Token `lookup`: the `ttl` value in the response now reflects the actual
+ remaining TTL rather than the original TTL specified when the token was
+ created; this value is now located in `creation_ttl` [GH-986]
+ * Vault no longer uses grace periods on leases or token TTLs. Uncertainty
+ about the length grace period for any given backend could cause confusion
+ and uncertainty. [GH-1002]
+ * `rekey`: Rekey now requires a nonce to be supplied with key shares. This
+ nonce is generated at the start of a rekey attempt and is unique for that
+ attempt.
+ * `status`: The exit code for the `status` CLI command is now `2` for an
+ uninitialized Vault instead of `1`. `1` is returned for errors. This better
+ matches the rest of the CLI.
+
+FEATURES:
+
+ * **Split Data/High Availability Physical Backends**: You can now configure
+ two separate physical backends: one to be used for High Availability
+ coordination and another to be used for encrypted data storage. See the
+ [configuration
+ documentation](https://vaultproject.io/docs/config/index.html) for details.
+ [GH-395]
+ * **Fine-Grained Access Control**: Policies can now use the `capabilities` set
+ to specify fine-grained control over operations allowed on a path, including
+ separation of `sudo` privileges from other privileges. These can be mixed
+ and matched in any way desired. The `policy` value is kept for backwards
+ compatibility. See the [updated policy
+ documentation](https://vaultproject.io/docs/concepts/policies.html) for
+ details. [GH-914]
+ * **List Support**: Listing is now supported via the API and the new `vault
+ list` command. This currently supports listing keys in the `generic` and
+ `cubbyhole` backends and a few other places (noted in the IMPROVEMENTS
+ section below). Different parts of the API and backends will need to
+ implement list capabilities in ways that make sense to particular endpoints,
+ so further support will appear over time. [GH-617]
+ * **Root Token Generation via Unseal Keys**: You can now use the
+ `generate-root` CLI command to generate new orphaned, non-expiring root
+ tokens in case the original is lost or revoked (accidentally or
+ purposefully). This requires a quorum of unseal key holders. The output
+ value is protected via any PGP key of the initiator's choosing or a one-time
+ pad known only to the initiator (a suitable pad can be generated via the
+ `-genotp` flag to the command. [GH-915]
+ * **Unseal Key Archiving**: You can now optionally have Vault store your
+ unseal keys in your chosen physical store for disaster recovery purposes.
+ This option is only available when the keys are encrypted with PGP. [GH-907]
+ * **Keybase Support for PGP Encryption Keys**: You can now specify Keybase
+ users when passing in PGP keys to the `init`, `rekey`, and `generate-root`
+ CLI commands. Public keys for these users will be fetched automatically.
+ [GH-901]
+ * **DynamoDB HA Physical Backend**: There is now a new, community-supported
+ HA-enabled physical backend using Amazon DynamoDB. See the [configuration
+ documentation](https://vaultproject.io/docs/config/index.html) for details.
+ [GH-878]
+ * **PostgreSQL Physical Backend**: There is now a new, community-supported
+ physical backend using PostgreSQL. See the [configuration
+ documentation](https://vaultproject.io/docs/config/index.html) for details.
+ [GH-945]
+ * **STS Support in AWS Secret Backend**: You can now use the AWS secret
+ backend to fetch STS tokens rather than IAM users. [GH-927]
+ * **Speedups in the transit backend**: The `transit` backend has gained a
+ cache, and now loads only the working set of keys (e.g. from the
+ `min_decryption_version` to the current key version) into its working set.
+ This provides large speedups and potential memory savings when the `rotate`
+ feature of the backend is used heavily.
+
+IMPROVEMENTS:
+
+ * cli: Output secrets sorted by key name [GH-830]
+ * cli: Support YAML as an output format [GH-832]
+ * cli: Show an error if the output format is incorrect, rather than falling
+ back to an empty table [GH-849]
+ * cli: Allow setting the `advertise_addr` for HA via the
+ `VAULT_ADVERTISE_ADDR` environment variable [GH-581]
+ * cli/generate-root: Add generate-root and associated functionality [GH-915]
+ * cli/init: Add `-check` flag that returns whether Vault is initialized
+ [GH-949]
+ * cli/server: Use internal functions for the token-helper rather than shelling
+ out, which fixes some problems with using a static binary in Docker or paths
+ with multiple spaces when launching in `-dev` mode [GH-850]
+ * cli/token-lookup: Add token-lookup command [GH-892]
+ * command/{init,rekey}: Allow ASCII-armored keychain files to be arguments for
+ `-pgp-keys` [GH-940]
+ * conf: Use normal bool values rather than empty/non-empty for the
+ `tls_disable` option [GH-802]
+ * credential/ldap: Add support for binding, both anonymously (to discover a
+ user DN) and via a username and password [GH-975]
+ * credential/token: Add `last_renewal_time` to token lookup calls [GH-896]
+ * credential/token: Change `ttl` to reflect the current remaining TTL; the
+ original value is in `creation_ttl` [GH-1007]
+ * helper/certutil: Add ability to parse PKCS#8 bundles [GH-829]
+ * logical/aws: You can now get STS tokens instead of IAM users [GH-927]
+ * logical/cassandra: Add `protocol_version` parameter to set the CQL proto
+ version [GH-1005]
+ * logical/cubbyhole: Add cubbyhole access to default policy [GH-936]
+ * logical/mysql: Add list support for roles path [GH-984]
+ * logical/pki: Fix up key usages being specified for CAs [GH-989]
+ * logical/pki: Add list support for roles path [GH-985]
+ * logical/pki: Allow `pem_bundle` to be specified as the format, which
+ provides a concatenated PEM bundle of returned values [GH-1008]
+ * logical/pki: Add 30 seconds of slack to the validity start period to
+ accommodate some clock skew in machines [GH-1036]
+ * logical/postgres: Add `max_idle_connections` parameter [GH-950]
+ * logical/postgres: Add list support for roles path
+ * logical/ssh: Add list support for roles path [GH-983]
+ * logical/transit: Keys are archived and only keys between the latest version
+ and `min_decryption_version` are loaded into the working set. This can
+ provide a very large speed increase when rotating keys very often. [GH-977]
+ * logical/transit: Keys are now cached, which should provide a large speedup
+ in most cases [GH-979]
+ * physical/cache: Use 2Q cache instead of straight LRU [GH-908]
+ * physical/etcd: Support basic auth [GH-859]
+ * physical/etcd: Support sync functionality and enable by default [GH-921]
+
+BUG FIXES:
+
+ * api: Correct the HTTP verb used in the LookupSelf method [GH-887]
+ * api: Fix the output of `Sys().MountConfig(...)` to return proper values
+ [GH-1017]
+ * command/read: Fix panic when an empty argument was given [GH-923]
+ * command/ssh: Fix panic when username lookup fails [GH-886]
+ * core: When running in standalone mode, don't advertise that we are active
+ until post-unseal setup completes [GH-872]
+ * core: Update go-cleanhttp dependency to ensure idle connections aren't
+ leaked [GH-867]
+ * core: Don't allow tokens to have duplicate policies [GH-897]
+ * core: Fix regression in `sys/renew` that caused information stored in the
+ Secret part of the response to be lost [GH-912]
+ * physical: Use square brackets when setting an IPv6-based advertise address
+ as the auto-detected advertise address [GH-883]
+ * physical/s3: Use an initialized client when using IAM roles to fix a
+ regression introduced against newer versions of the AWS Go SDK [GH-836]
+ * secret/pki: Fix a condition where unmounting could fail if the CA
+ certificate was not properly loaded [GH-946]
+ * secret/ssh: Fix a problem where SSH connections were not always closed
+ properly [GH-942]
+
+MISC:
+
+ * Clarified our stance on support for community-derived physical backends.
+ See the [configuration
+ documentation](https://vaultproject.io/docs/config/index.html) for details.
+ * Add `vault-java` to libraries [GH-851]
+ * Various minor documentation fixes and improvements [GH-839] [GH-854]
+ [GH-861] [GH-876] [GH-899] [GH-900] [GH-904] [GH-923] [GH-924] [GH-958]
+ [GH-959] [GH-981] [GH-990] [GH-1024] [GH-1025]
+
+BUILD NOTE:
+
+ * The HashiCorp-provided binary release of Vault 0.5.0 is built against a
+ patched version of Go 1.5.3 containing two specific bug fixes affecting TLS
+ certificate handling. These fixes are in the Go 1.6 tree and were
+ cherry-picked on top of stock Go 1.5.3. If you want to examine the way in
+ which the releases were built, please look at our [cross-compilation
+ Dockerfile](https://github.com/hashicorp/vault/blob/v0.5.0/scripts/cross/Dockerfile-patched-1.5.3).
+
+## 0.4.1 (January 13, 2016)
+
+SECURITY:
+
+ * Build against Go 1.5.3 to mitigate a security vulnerability introduced in
+ Go 1.5. For more information, please see
+ https://groups.google.com/forum/#!topic/golang-dev/MEATuOi_ei4
+
+This is a security-only release; other than the version number and building
+against Go 1.5.3, there are no changes from 0.4.0.
+
+## 0.4.0 (December 10, 2015)
+
+DEPRECATIONS/CHANGES:
+
+ * Policy Name Casing: Policy names are now normalized to lower-case on write,
+ helping prevent accidental case mismatches. For backwards compatibility,
+ policy names are not currently normalized when reading or deleting. [GH-676]
+ * Default etcd port number: the default connection string for the `etcd`
+ physical store uses port 2379 instead of port 4001, which is the port used
+ by the supported version 2.x of etcd. [GH-753]
+ * As noted below in the FEATURES section, if your Vault installation contains
+ a policy called `default`, new tokens created will inherit this policy
+ automatically.
+ * In the PKI backend there have been a few minor breaking changes:
+ * The token display name is no longer a valid option for providing a base
+ domain for issuance. Since this name is prepended with the name of the
+ authentication backend that issued it, it provided a faulty use-case at best
+ and a confusing experience at worst. We hope to figure out a better
+ per-token value in a future release.
+ * The `allowed_base_domain` parameter has been changed to `allowed_domains`,
+ which accepts a comma-separated list of domains. This allows issuing
+ certificates with DNS subjects across multiple domains. If you had a
+ configured `allowed_base_domain` parameter, it will be migrated
+ automatically when the role is read (either via a normal read, or via
+ issuing a certificate).
+
+FEATURES:
+
+ * **Significantly Enhanced PKI Backend**: The `pki` backend can now generate
+ and sign root CA certificates and intermediate CA CSRs. It can also now sign
+ submitted client CSRs, as well as a significant number of other
+ enhancements. See the updated documentation for the full API. [GH-666]
+ * **CRL Checking for Certificate Authentication**: The `cert` backend now
+ supports pushing CRLs into the mount and using the contained serial numbers
+ for revocation checking. See the documentation for the `cert` backend for
+ more info. [GH-330]
+ * **Default Policy**: Vault now ensures that a policy named `default` is added
+ to every token. This policy cannot be deleted, but it can be modified
+ (including to an empty policy). There are three endpoints allowed in the
+ default `default` policy, related to token self-management: `lookup-self`,
+ which allows a token to retrieve its own information, and `revoke-self` and
+ `renew-self`, which are self-explanatory. If your existing Vault
+ installation contains a policy called `default`, it will not be overridden,
+ but it will be added to each new token created. You can override this
+ behavior when using manual token creation (i.e. not via an authentication
+ backend) by setting the "no_default_policy" flag to true. [GH-732]
+
+IMPROVEMENTS:
+
+ * api: API client now uses a 60 second timeout instead of indefinite [GH-681]
+ * api: Implement LookupSelf, RenewSelf, and RevokeSelf functions for auth
+ tokens [GH-739]
+ * api: Standardize environment variable reading logic inside the API; the CLI
+ now uses this but can still override via command-line parameters [GH-618]
+ * audit: HMAC-SHA256'd client tokens are now stored with each request entry.
+ Previously they were only displayed at creation time; this allows much
+ better traceability of client actions. [GH-713]
+ * audit: There is now a `sys/audit-hash` endpoint that can be used to generate
+ an HMAC-SHA256'd value from provided data using the given audit backend's
+ salt [GH-784]
+ * core: The physical storage read cache can now be disabled via
+ "disable_cache" [GH-674]
+ * core: The unsealing process can now be reset midway through (this feature
+ was documented before, but not enabled) [GH-695]
+ * core: Tokens can now renew themselves [GH-455]
+ * core: Base64-encoded PGP keys can be used with the CLI for `init` and
+ `rekey` operations [GH-653]
+ * core: Print version on startup [GH-765]
+ * core: Access to `sys/policy` and `sys/mounts` now uses the normal ACL system
+ instead of requiring a root token [GH-769]
+ * credential/token: Display whether or not a token is an orphan in the output
+ of a lookup call [GH-766]
+ * logical: Allow `.` in path-based variables in many more locations [GH-244]
+ * logical: Responses now contain a "warnings" key containing a list of
+ warnings returned from the server. These are conditions that did not require
+ failing an operation, but of which the client should be aware. [GH-676]
+ * physical/(consul,etcd): Consul and etcd now use a connection pool to limit
+ the number of outstanding operations, improving behavior when a lot of
+ operations must happen at once [GH-677] [GH-780]
+ * physical/consul: The `datacenter` parameter was removed; It could not be
+ effective unless the Vault node (or the Consul node it was connecting to)
+ was in the datacenter specified, in which case it wasn't needed [GH-816]
+ * physical/etcd: Support TLS-encrypted connections and use a connection pool
+ to limit the number of outstanding operations [GH-780]
+ * physical/s3: The S3 endpoint can now be configured, allowing using
+ S3-API-compatible storage solutions [GH-750]
+ * physical/s3: The S3 bucket can now be configured with the `AWS_S3_BUCKET`
+ environment variable [GH-758]
+ * secret/consul: Management tokens can now be created [GH-714]
+
+BUG FIXES:
+
+ * api: API client now checks for a 301 response for redirects. Vault doesn't
+ generate these, but in certain conditions Go's internal HTTP handler can
+ generate them, leading to client errors.
+ * cli: `token-create` now supports the `ttl` parameter in addition to the
+ deprecated `lease` parameter. [GH-688]
+ * core: Return data from `generic` backends on the last use of a limited-use
+ token [GH-615]
+ * core: Fix upgrade path for leases created in `generic` prior to 0.3 [GH-673]
+ * core: Stale leader entries will now be reaped [GH-679]
+ * core: Using `mount-tune` on the auth/token path did not take effect.
+ [GH-688]
+ * core: Fix a potential race condition when (un)sealing the vault with metrics
+ enabled [GH-694]
+ * core: Fix an error that could happen in some failure scenarios where Vault
+ could fail to revert to a clean state [GH-733]
+ * core: Ensure secondary indexes are removed when a lease is expired [GH-749]
+ * core: Ensure rollback manager uses an up-to-date mounts table [GH-771]
+ * everywhere: Don't use http.DefaultClient, as it shares state implicitly and
+ is a source of hard-to-track-down bugs [GH-700]
+ * credential/token: Allow creating orphan tokens via an API path [GH-748]
+ * secret/generic: Validate given duration at write time, not just read time;
+ if stored durations are not parseable, return a warning and the default
+ duration rather than an error [GH-718]
+ * secret/generic: Return 400 instead of 500 when `generic` backend is written
+ to with no data fields [GH-825]
+ * secret/postgresql: Revoke permissions before dropping a user or revocation
+ may fail [GH-699]
+
+MISC:
+
+ * Various documentation fixes and improvements [GH-685] [GH-688] [GH-697]
+ [GH-710] [GH-715] [GH-831]
+
+## 0.3.1 (October 6, 2015)
+
+SECURITY:
+
+ * core: In certain failure scenarios, the full values of requests and
+ responses would be logged [GH-665]
+
+FEATURES:
+
+ * **Settable Maximum Open Connections**: The `mysql` and `postgresql` backends
+ now allow setting the number of maximum open connections to the database,
+ which was previously capped to 2. [GH-661]
+ * **Renewable Tokens for GitHub**: The `github` backend now supports
+ specifying a TTL, enabling renewable tokens. [GH-664]
+
+BUG FIXES:
+
+ * dist: linux-amd64 distribution was dynamically linked [GH-656]
+ * credential/github: Fix acceptance tests [GH-651]
+
+MISC:
+
+ * Various minor documentation fixes and improvements [GH-649] [GH-650]
+ [GH-654] [GH-663]
+
+## 0.3.0 (September 28, 2015)
+
+DEPRECATIONS/CHANGES:
+
+Note: deprecations and breaking changes in upcoming releases are announced
+ahead of time on the "vault-tool" mailing list.
+
+ * **Cookie Authentication Removed**: As of 0.3 the only way to authenticate is
+ via the X-Vault-Token header. Cookie authentication was hard to properly
+ test, could result in browsers/tools/applications saving tokens in plaintext
+ on disk, and other issues. [GH-564]
+ * **Terminology/Field Names**: Vault is transitioning from overloading the
+ term "lease" to mean both "a set of metadata" and "the amount of time the
+ metadata is valid". The latter is now being referred to as TTL (or
+ "lease_duration" for backwards-compatibility); some parts of Vault have
+ already switched to using "ttl" and others will follow in upcoming releases.
+ In particular, the "token", "generic", and "pki" backends accept both "ttl"
+ and "lease" but in 0.4 only "ttl" will be accepted. [GH-528]
+ * **Downgrade Not Supported**: Due to enhancements in the storage subsystem,
+ values written by Vault 0.3+ will not be able to be read by prior versions
+ of Vault. There are no expected upgrade issues, however, as with all
+ critical infrastructure it is recommended to back up Vault's physical
+ storage before upgrading.
+
+FEATURES:
+
+ * **SSH Backend**: Vault can now be used to delegate SSH access to machines,
+ via a (recommended) One-Time Password approach or by issuing dynamic keys.
+ [GH-385]
+ * **Cubbyhole Backend**: This backend works similarly to the "generic" backend
+ but provides a per-token workspace. This enables some additional
+ authentication workflows (especially for containers) and can be useful to
+ applications to e.g. store local credentials while being restarted or
+ upgraded, rather than persisting to disk. [GH-612]
+ * **Transit Backend Improvements**: The transit backend now allows key
+ rotation and datakey generation. For rotation, data encrypted with previous
+ versions of the keys can still be decrypted, down to a (configurable)
+ minimum previous version; there is a rewrap function for manual upgrades of
+ ciphertext to newer versions. Additionally, the backend now allows
+ generating and returning high-entropy keys of a configurable bitsize
+ suitable for AES and other functions; this is returned wrapped by a named
+ key, or optionally both wrapped and plaintext for immediate use. [GH-626]
+ * **Global and Per-Mount Default/Max TTL Support**: You can now set the
+ default and maximum Time To Live for leases both globally and per-mount.
+ Per-mount settings override global settings. Not all backends honor these
+ settings yet, but the maximum is a hard limit enforced outside the backend.
+ See the documentation for "/sys/mounts/" for details on configuring
+ per-mount TTLs. [GH-469]
+ * **PGP Encryption for Unseal Keys**: When initializing or rotating Vault's
+ master key, PGP/GPG public keys can now be provided. The output keys will be
+ encrypted with the given keys, in order. [GH-570]
+ * **Duo Multifactor Authentication Support**: Backends that support MFA can
+ now use Duo as the mechanism. [GH-464]
+ * **Performance Improvements**: Users of the "generic" backend will see a
+ significant performance improvement as the backend no longer creates leases,
+ although it does return TTLs (global/mount default, or set per-item) as
+ before. [GH-631]
+ * **Codebase Audit**: Vault's codebase was audited by iSEC. (The terms of the
+ audit contract do not allow us to make the results public.) [GH-220]
+
+IMPROVEMENTS:
+
+ * audit: Log entries now contain a time field [GH-495]
+ * audit: Obfuscated audit entries now use hmac-sha256 instead of sha1 [GH-627]
+ * backends: Add ability for a cleanup function to be called on backend unmount
+ [GH-608]
+ * config: Allow specifying minimum acceptable TLS version [GH-447]
+ * core: If trying to mount in a location that is already mounted, be more
+ helpful about the error [GH-510]
+ * core: Be more explicit on failure if the issue is invalid JSON [GH-553]
+ * core: Tokens can now revoke themselves [GH-620]
+ * credential/app-id: Give a more specific error when sending a duplicate POST
+ to sys/auth/app-id [GH-392]
+ * credential/github: Support custom API endpoints (e.g. for Github Enterprise)
+ [GH-572]
+ * credential/ldap: Add per-user policies and option to login with
+ userPrincipalName [GH-420]
+ * credential/token: Allow root tokens to specify the ID of a token being
+ created from CLI [GH-502]
+ * credential/userpass: Enable renewals for login tokens [GH-623]
+ * scripts: Use /usr/bin/env to find Bash instead of hardcoding [GH-446]
+ * scripts: Use godep for build scripts to use same environment as tests
+ [GH-404]
+ * secret/mysql: Allow reading configuration data [GH-529]
+ * secret/pki: Split "allow_any_name" logic to that and "enforce_hostnames", to
+ allow for non-hostname values (e.g. for client certificates) [GH-555]
+ * storage/consul: Allow specifying certificates used to talk to Consul
+ [GH-384]
+ * storage/mysql: Allow SSL encrypted connections [GH-439]
+ * storage/s3: Allow using temporary security credentials [GH-433]
+ * telemetry: Put telemetry object in configuration to allow more flexibility
+ [GH-419]
+ * testing: Disable mlock for testing of logical backends so as not to require
+ root [GH-479]
+
+BUG FIXES:
+
+ * audit/file: Do not enable auditing if file permissions are invalid [GH-550]
+ * backends: Allow hyphens in endpoint patterns (fixes AWS and others) [GH-559]
+ * cli: Fixed missing setup of client TLS certificates if no custom CA was
+ provided
+ * cli/read: Do not include a carriage return when using raw field output
+ [GH-624]
+ * core: Bad input data could lead to a panic for that session, rather than
+ returning an error [GH-503]
+ * core: Allow SHA2-384/SHA2-512 hashed certificates [GH-448]
+ * core: Do not return a Secret if there are no uses left on a token (since it
+ will be unable to be used) [GH-615]
+ * core: Code paths that called lookup-self would decrement num_uses and
+ potentially immediately revoke a token [GH-552]
+ * core: Some /sys/ paths would not properly redirect from a standby to the
+ leader [GH-499] [GH-551]
+ * credential/aws: Translate spaces in a token's display name to avoid making
+ IAM unhappy [GH-567]
+ * credential/github: Integration failed if more than ten organizations or
+ teams [GH-489]
+ * credential/token: Tokens with sudo access to "auth/token/create" can now use
+ root-only options [GH-629]
+ * secret/cassandra: Work around backwards-incompatible change made in
+ Cassandra 2.2 preventing Vault from properly setting/revoking leases
+ [GH-549]
+ * secret/mysql: Use varbinary instead of varchar to avoid InnoDB/UTF-8 issues
+ [GH-522]
+ * secret/postgres: Explicitly set timezone in connections [GH-597]
+ * storage/etcd: Renew semaphore periodically to prevent leadership flapping
+ [GH-606]
+ * storage/zk: Fix collisions in storage that could lead to data unavailability
+ [GH-411]
+
+MISC:
+
+ * Various documentation fixes and improvements [GH-412] [GH-474] [GH-476]
+ [GH-482] [GH-483] [GH-486] [GH-508] [GH-568] [GH-574] [GH-586] [GH-590]
+ [GH-591] [GH-592] [GH-595] [GH-613] [GH-637]
+ * Less "armon" in stack traces [GH-453]
+ * Sourcegraph integration [GH-456]
+
+## 0.2.0 (July 13, 2015)
+
+FEATURES:
+
+ * **Key Rotation Support**: The `rotate` command can be used to rotate the
+ master encryption key used to write data to the storage (physical) backend.
+ [GH-277]
+ * **Rekey Support**: Rekey can be used to rotate the master key and change the
+ configuration of the unseal keys (number of shares, threshold required).
+ [GH-277]
+ * **New secret backend: `pki`**: Enable Vault to be a certificate authority
+ and generate signed TLS certificates. [GH-310]
+ * **New secret backend: `cassandra`**: Generate dynamic credentials for
+ Cassandra [GH-363]
+ * **New storage backend: `etcd`**: store physical data in etcd [GH-259]
+ [GH-297]
+ * **New storage backend: `s3`**: store physical data in S3. Does not support
+ HA. [GH-242]
+ * **New storage backend: `MySQL`**: store physical data in MySQL. Does not
+ support HA. [GH-324]
+ * `transit` secret backend supports derived keys for per-transaction unique
+ keys [GH-399]
+
+IMPROVEMENTS:
+
+ * cli/auth: Enable `cert` method [GH-380]
+ * cli/auth: read input from stdin [GH-250]
+ * cli/read: Ability to read a single field from a secret [GH-257]
+ * cli/write: Adding a force flag when no input required
+ * core: allow time duration format in place of seconds for some inputs
+ * core: audit log provides more useful information [GH-360]
+ * core: graceful shutdown for faster HA failover
+ * core: **change policy format** to use explicit globbing [GH-400] Any
+ existing policy in Vault is automatically upgraded to avoid issues. All
+ policy files must be updated for future writes. Adding the explicit glob
+ character `*` to the path specification is all that is required.
+ * core: policy merging to give deny highest precedence [GH-400]
+ * credential/app-id: Protect against timing attack on app-id
+ * credential/cert: Record the common name in the metadata [GH-342]
+ * credential/ldap: Allow TLS verification to be disabled [GH-372]
+ * credential/ldap: More flexible names allowed [GH-245] [GH-379] [GH-367]
+ * credential/userpass: Protect against timing attack on password
+ * credential/userpass: Use bcrypt for password matching
+ * http: response codes improved to reflect error [GH-366]
+ * http: the `sys/health` endpoint supports `?standbyok` to return 200 on
+ standby [GH-389]
+ * secret/app-id: Support deleting AppID and UserIDs [GH-200]
+ * secret/consul: Fine grained lease control [GH-261]
+ * secret/transit: Decouple raw key from key management endpoint [GH-355]
+ * secret/transit: Upsert named key when encrypt is used [GH-355]
+ * storage/zk: Support for HA configuration [GH-252]
+ * storage/zk: Changing node representation. **Backwards incompatible**.
+ [GH-416]
+
+BUG FIXES:
+
+ * audit/file: file removing TLS connection state
+ * audit/syslog: fix removing TLS connection state
+ * command/*: commands accepting `k=v` allow blank values
+ * core: Allow building on FreeBSD [GH-365]
+ * core: Fixed various panics when audit logging enabled
+ * core: Lease renewal does not create redundant lease
+ * core: fixed leases with negative duration [GH-354]
+ * core: token renewal does not create child token
+ * core: fixing panic when lease increment is null [GH-408]
+ * credential/app-id: Salt the paths in storage backend to avoid information
+ leak
+ * credential/cert: Fixing client certificate not being requested
+ * credential/cert: Fixing panic when no certificate match found [GH-361]
+ * http: Accept PUT as POST for sys/auth
+ * http: Accept PUT as POST for sys/mounts [GH-349]
+ * http: Return 503 when sealed [GH-225]
+ * secret/postgres: Username length is capped to exceeding limit
+ * server: Do not panic if backend not configured [GH-222]
+ * server: Explicitly check value of tls_diable [GH-201]
+ * storage/zk: Fixed issues with version conflicts [GH-190]
+
+MISC:
+
+ * cli/path-help: renamed from `help` to avoid confusion
+
+## 0.1.2 (May 11, 2015)
+
+FEATURES:
+
+ * **New physical backend: `zookeeper`**: store physical data in Zookeeper.
+ HA not supported yet.
+ * **New credential backend: `ldap`**: authenticate using LDAP credentials.
+
+IMPROVEMENTS:
+
+ * core: Auth backends can store internal data about auth creds
+ * audit: display name for auth is shown in logs [GH-176]
+ * command/*: `-insecure` has been renamed to `-tls-skip-verify` [GH-130]
+ * command/*: `VAULT_TOKEN` overrides local stored auth [GH-162]
+ * command/server: environment variables are copy-pastable
+ * credential/app-id: hash of app and user ID are in metadata [GH-176]
+ * http: HTTP API accepts `X-Vault-Token` as auth header [GH-124]
+ * logical/*: Generate help output even if no synopsis specified
+
+BUG FIXES:
+
+ * core: login endpoints should never return secrets
+ * core: Internal data should never be returned from core endpoints
+ * core: defer barrier initialization to as late as possible to avoid error
+ cases during init that corrupt data (no data loss)
+ * core: guard against invalid init config earlier
+ * audit/file: create file if it doesn't exist [GH-148]
+ * command/*: ignore directories when traversing CA paths [GH-181]
+ * credential/*: all policy mapping keys are case insensitive [GH-163]
+ * physical/consul: Fixing path for locking so HA works in every case
+
+## 0.1.1 (May 2, 2015)
+
+SECURITY CHANGES:
+
+ * physical/file: create the storge with 0600 permissions [GH-102]
+ * token/disk: write the token to disk with 0600 perms
+
+IMPROVEMENTS:
+
+ * core: Very verbose error if mlock fails [GH-59]
+ * command/*: On error with TLS oversized record, show more human-friendly
+ error message. [GH-123]
+ * command/read: `lease_renewable` is now outputted along with the secret to
+ show whether it is renewable or not
+ * command/server: Add configuration option to disable mlock
+ * command/server: Disable mlock for dev mode so it works on more systems
+
+BUG FIXES:
+
+ * core: if token helper isn't absolute, prepend with path to Vault
+ executable, not "vault" (which requires PATH) [GH-60]
+ * core: Any "mapping" routes allow hyphens in keys [GH-119]
+ * core: Validate `advertise_addr` is a valid URL with scheme [GH-106]
+ * command/auth: Using an invalid token won't crash [GH-75]
+ * credential/app-id: app and user IDs can have hyphens in keys [GH-119]
+ * helper/password: import proper DLL for Windows to ask password [GH-83]
+
+## 0.1.0 (April 28, 2015)
+
+ * Initial release
diff --git a/vendor/github.com/hashicorp/vault/CONTRIBUTING.md b/vendor/github.com/hashicorp/vault/CONTRIBUTING.md
new file mode 100644
index 0000000..6fc1888
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/CONTRIBUTING.md
@@ -0,0 +1,72 @@
+# Contributing to Vault
+
+**Please note:** We take Vault's security and our users' trust very seriously.
+If you believe you have found a security issue in Vault, please responsibly
+disclose by contacting us at security@hashicorp.com.
+
+**First:** if you're unsure or afraid of _anything_, just ask or submit the
+issue or pull request anyways. You won't be yelled at for giving it your best
+effort. The worst that can happen is that you'll be politely asked to change
+something. We appreciate any sort of contributions, and don't want a wall of
+rules to get in the way of that.
+
+That said, if you want to ensure that a pull request is likely to be merged,
+talk to us! You can find out our thoughts and ensure that your contribution
+won't clash or be obviated by Vault's normal direction. A great way to do this
+is via the [Vault Google Group][2]. Sometimes Vault devs are in `#vault-tool`
+on Freenode, too.
+
+This document will cover what we're looking for in terms of reporting issues.
+By addressing all the points we're looking for, it raises the chances we can
+quickly merge or address your contributions.
+
+## Issues
+
+### Reporting an Issue
+
+* Make sure you test against the latest released version. It is possible
+ we already fixed the bug you're experiencing. Even better is if you can test
+ against `master`, as bugs are fixed regularly but new versions are only
+ released every few months.
+
+* Provide steps to reproduce the issue, and if possible include the expected
+ results as well as the actual results. Please provide text, not screen shots!
+
+* If you are seeing an internal Vault error (a status code of 5xx), please be
+ sure to post relevant parts of (or the entire) Vault log, as often these
+ errors are logged on the server but not reported to the user
+
+* If you experienced a panic, please create a [gist](https://gist.github.com)
+ of the *entire* generated crash log for us to look at. Double check
+ no sensitive items were in the log.
+
+* Respond as promptly as possible to any questions made by the Vault
+ team to your issue. Stale issues will be closed periodically.
+
+### Issue Lifecycle
+
+1. The issue is reported.
+
+2. The issue is verified and categorized by a Vault collaborator.
+ Categorization is done via tags. For example, bugs are marked as "bugs".
+
+3. Unless it is critical, the issue may be left for a period of time (sometimes
+ many weeks), giving outside contributors -- maybe you!? -- a chance to
+ address the issue.
+
+4. The issue is addressed in a pull request or commit. The issue will be
+ referenced in the commit message so that the code that fixes it is clearly
+ linked.
+
+5. The issue is closed. Sometimes, valid issues will be closed to keep
+ the issue tracker clean. The issue is still indexed and available for
+ future viewers, or can be re-opened if necessary.
+
+## Setting up Go to work on Vault
+
+If you have never worked with Go before, you will have to complete the
+following steps listed in the README, under the section [Developing Vault][1].
+
+
+[1]: https://github.com/hashicorp/vault#developing-vault
+[2]: https://groups.google.com/group/vault-tool
diff --git a/vendor/github.com/hashicorp/vault/Makefile b/vendor/github.com/hashicorp/vault/Makefile
new file mode 100644
index 0000000..52ab43c
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/Makefile
@@ -0,0 +1,74 @@
+TEST?=$$(go list ./... | grep -v /vendor/)
+VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods -nilfunc -printf -rangeloops -shift -structtags -unsafeptr
+EXTERNAL_TOOLS=\
+ github.com/mitchellh/gox
+BUILD_TAGS?=vault
+
+default: dev
+
+# bin generates the releaseable binaries for Vault
+bin: generate
+ @CGO_ENABLED=0 BUILD_TAGS='$(BUILD_TAGS)' sh -c "'$(CURDIR)/scripts/build.sh'"
+
+# dev creates binaries for testing Vault locally. These are put
+# into ./bin/ as well as $GOPATH/bin, except for quickdev which
+# is only put into /bin/
+quickdev: generate
+ @CGO_ENABLED=0 go build -i -tags='$(BUILD_TAGS)' -o bin/vault
+dev: generate
+ @CGO_ENABLED=0 BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'"
+dev-dynamic: generate
+ @CGO_ENABLED=1 BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'"
+
+# test runs the unit tests and vets the code
+test: generate
+ CGO_ENABLED=0 VAULT_TOKEN= VAULT_ACC= go test -tags='$(BUILD_TAGS)' $(TEST) $(TESTARGS) -timeout=20m -parallel=4
+
+testcompile: generate
+ @for pkg in $(TEST) ; do \
+ go test -v -c -tags='$(BUILD_TAGS)' $$pkg -parallel=4 ; \
+ done
+
+# testacc runs acceptance tests
+testacc: generate
+ @if [ "$(TEST)" = "./..." ]; then \
+ echo "ERROR: Set TEST to a specific package"; \
+ exit 1; \
+ fi
+ VAULT_ACC=1 go test -tags='$(BUILD_TAGS)' $(TEST) -v $(TESTARGS) -timeout 45m
+
+# testrace runs the race checker
+testrace: generate
+ CGO_ENABLED=1 VAULT_TOKEN= VAULT_ACC= go test -tags='$(BUILD_TAGS)' -race $(TEST) $(TESTARGS) -timeout=20m -parallel=4
+
+cover:
+ ./scripts/coverage.sh --html
+
+# vet runs the Go source code static analysis tool `vet` to find
+# any common errors.
+vet:
+ @go list -f '{{.Dir}}' ./... | grep -v /vendor/ \
+ | grep -v '.*github.com/hashicorp/vault$$' \
+ | xargs go tool vet ; if [ $$? -eq 1 ]; then \
+ echo ""; \
+ echo "Vet found suspicious constructs. Please check the reported constructs"; \
+ echo "and fix them if necessary before submitting the code for reviewal."; \
+ fi
+
+# generate runs `go generate` to build the dynamically generated
+# source files.
+generate:
+ go generate $(go list ./... | grep -v /vendor/)
+
+# bootstrap the build by downloading additional tools
+bootstrap:
+ @for tool in $(EXTERNAL_TOOLS) ; do \
+ echo "Installing $$tool" ; \
+ go get -u $$tool; \
+ done
+
+proto:
+ protoc -I helper/forwarding -I vault -I ../../.. vault/*.proto --go_out=plugins=grpc:vault
+ protoc -I helper/forwarding -I vault -I ../../.. helper/forwarding/types.proto --go_out=plugins=grpc:helper/forwarding
+
+.PHONY: bin default generate test vet bootstrap
diff --git a/vendor/github.com/hashicorp/vault/README.md b/vendor/github.com/hashicorp/vault/README.md
new file mode 100644
index 0000000..61b2bb42
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/README.md
@@ -0,0 +1,130 @@
+Vault [![Build Status](https://travis-ci.org/hashicorp/vault.svg)](https://travis-ci.org/hashicorp/vault) [![Join the chat at https://gitter.im/hashicorp-vault/Lobby](https://badges.gitter.im/hashicorp-vault/Lobby.svg)](https://gitter.im/hashicorp-vault/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+=========
+**Please note**: We take Vault's security and our users' trust very seriously. If you believe you have found a security issue in Vault, _please responsibly disclose_ by contacting us at [security@hashicorp.com](mailto:security@hashicorp.com).
+
+=========
+
+- Website: https://www.vaultproject.io
+- IRC: `#vault-tool` on Freenode
+- Announcement list: [Google Groups](https://groups.google.com/group/hashicorp-announce)
+- Discussion list: [Google Groups](https://groups.google.com/group/vault-tool)
+
+
+
+Vault is a tool for securely accessing secrets. A secret is anything that you want to tightly control access to, such as API keys, passwords, certificates, and more. Vault provides a unified interface to any secret, while providing tight access control and recording a detailed audit log.
+
+A modern system requires access to a multitude of secrets: database credentials, API keys for external services, credentials for service-oriented architecture communication, etc. Understanding who is accessing what secrets is already very difficult and platform-specific. Adding on key rolling, secure storage, and detailed audit logs is almost impossible without a custom solution. This is where Vault steps in.
+
+The key features of Vault are:
+
+* **Secure Secret Storage**: Arbitrary key/value secrets can be stored
+ in Vault. Vault encrypts these secrets prior to writing them to persistent
+ storage, so gaining access to the raw storage isn't enough to access
+ your secrets. Vault can write to disk, [Consul](https://www.consul.io),
+ and more.
+
+* **Dynamic Secrets**: Vault can generate secrets on-demand for some
+ systems, such as AWS or SQL databases. For example, when an application
+ needs to access an S3 bucket, it asks Vault for credentials, and Vault
+ will generate an AWS keypair with valid permissions on demand. After
+ creating these dynamic secrets, Vault will also automatically revoke them
+ after the lease is up.
+
+* **Data Encryption**: Vault can encrypt and decrypt data without storing
+ it. This allows security teams to define encryption parameters and
+ developers to store encrypted data in a location such as SQL without
+ having to design their own encryption methods.
+
+* **Leasing and Renewal**: All secrets in Vault have a _lease_ associated
+ with it. At the end of the lease, Vault will automatically revoke that
+ secret. Clients are able to renew leases via built-in renew APIs.
+
+* **Revocation**: Vault has built-in support for secret revocation. Vault
+ can revoke not only single secrets, but a tree of secrets, for example
+ all secrets read by a specific user, or all secrets of a particular type.
+ Revocation assists in key rolling as well as locking down systems in the
+ case of an intrusion.
+
+For more information, see the [introduction section](https://www.vaultproject.io/intro)
+of the Vault website.
+
+Getting Started & Documentation
+-------------------------------
+
+All documentation is available on the [Vault website](https://www.vaultproject.io).
+
+Developing Vault
+--------------------
+
+If you wish to work on Vault itself or any of its built-in systems, you'll
+first need [Go](https://www.golang.org) installed on your machine (version 1.8+
+is *required*).
+
+For local dev first make sure Go is properly installed, including setting up a
+[GOPATH](https://golang.org/doc/code.html#GOPATH). Next, clone this repository
+into `$GOPATH/src/github.com/hashicorp/vault`. You can then download any
+required build tools by bootstrapping your environment:
+
+```sh
+$ make bootstrap
+...
+```
+
+To compile a development version of Vault, run `make` or `make dev`. This will
+put the Vault binary in the `bin` and `$GOPATH/bin` folders:
+
+```sh
+$ make dev
+...
+$ bin/vault
+...
+```
+
+To run tests, type `make test`. Note: this requires Docker to be installed. If
+this exits with exit status 0, then everything is working!
+
+```sh
+$ make test
+...
+```
+
+If you're developing a specific package, you can run tests for just that
+package by specifying the `TEST` variable. For example below, only
+`vault` package tests will be run.
+
+```sh
+$ make test TEST=./vault
+...
+```
+
+### Acceptance Tests
+
+Vault has comprehensive [acceptance tests](https://en.wikipedia.org/wiki/Acceptance_testing)
+covering most of the features of the secret and auth backends.
+
+If you're working on a feature of a secret or auth backend and want to
+verify it is functioning (and also hasn't broken anything else), we recommend
+running the acceptance tests.
+
+**Warning:** The acceptance tests create/destroy/modify *real resources*, which
+may incur real costs in some cases. In the presence of a bug, it is technically
+possible that broken backends could leave dangling data behind. Therefore,
+please run the acceptance tests at your own risk. At the very least,
+we recommend running them in their own private account for whatever backend
+you're testing.
+
+To run the acceptance tests, invoke `make testacc`:
+
+```sh
+$ make testacc TEST=./builtin/logical/consul
+...
+```
+
+The `TEST` variable is required, and you should specify the folder where the
+backend is. The `TESTARGS` variable is recommended to filter down to a specific
+resource to test, since testing all of them at once can sometimes take a very
+long time.
+
+Acceptance tests typically require other environment variables to be set for
+things such as access keys. The test itself should error early and tell
+you what to set, so it is not documented here.
diff --git a/vendor/github.com/hashicorp/vault/api/api_test.go b/vendor/github.com/hashicorp/vault/api/api_test.go
new file mode 100644
index 0000000..d9059ea
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/api_test.go
@@ -0,0 +1,31 @@
+package api
+
+import (
+ "fmt"
+ "net"
+ "net/http"
+ "testing"
+
+ "golang.org/x/net/http2"
+)
+
+// testHTTPServer creates a test HTTP server that handles requests until
+// the listener returned is closed.
+func testHTTPServer(
+ t *testing.T, handler http.Handler) (*Config, net.Listener) {
+ ln, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ server := &http.Server{Handler: handler}
+ if err := http2.ConfigureServer(server, nil); err != nil {
+ t.Fatal(err)
+ }
+ go server.Serve(ln)
+
+ config := DefaultConfig()
+ config.Address = fmt.Sprintf("http://%s", ln.Addr())
+
+ return config, ln
+}
diff --git a/vendor/github.com/hashicorp/vault/api/client_test.go b/vendor/github.com/hashicorp/vault/api/client_test.go
new file mode 100644
index 0000000..f95d795
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/client_test.go
@@ -0,0 +1,162 @@
+package api
+
+import (
+ "bytes"
+ "io"
+ "net/http"
+ "os"
+ "testing"
+)
+
+func init() {
+ // Ensure our special envvars are not present
+ os.Setenv("VAULT_ADDR", "")
+ os.Setenv("VAULT_TOKEN", "")
+}
+
+func TestDefaultConfig_envvar(t *testing.T) {
+ os.Setenv("VAULT_ADDR", "https://vault.mycompany.com")
+ defer os.Setenv("VAULT_ADDR", "")
+
+ config := DefaultConfig()
+ if config.Address != "https://vault.mycompany.com" {
+ t.Fatalf("bad: %s", config.Address)
+ }
+
+ os.Setenv("VAULT_TOKEN", "testing")
+ defer os.Setenv("VAULT_TOKEN", "")
+
+ client, err := NewClient(config)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if token := client.Token(); token != "testing" {
+ t.Fatalf("bad: %s", token)
+ }
+}
+
+func TestClientNilConfig(t *testing.T) {
+ client, err := NewClient(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if client == nil {
+ t.Fatal("expected a non-nil client")
+ }
+}
+
+func TestClientSetAddress(t *testing.T) {
+ client, err := NewClient(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := client.SetAddress("http://172.168.2.1:8300"); err != nil {
+ t.Fatal(err)
+ }
+ if client.addr.Host != "172.168.2.1:8300" {
+ t.Fatalf("bad: expected: '172.168.2.1:8300' actual: %q", client.addr.Host)
+ }
+}
+
+func TestClientToken(t *testing.T) {
+ tokenValue := "foo"
+ handler := func(w http.ResponseWriter, req *http.Request) {}
+
+ config, ln := testHTTPServer(t, http.HandlerFunc(handler))
+ defer ln.Close()
+
+ client, err := NewClient(config)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ client.SetToken(tokenValue)
+
+ // Verify the token is set
+ if v := client.Token(); v != tokenValue {
+ t.Fatalf("bad: %s", v)
+ }
+
+ client.ClearToken()
+
+ if v := client.Token(); v != "" {
+ t.Fatalf("bad: %s", v)
+ }
+}
+
+func TestClientRedirect(t *testing.T) {
+ primary := func(w http.ResponseWriter, req *http.Request) {
+ w.Write([]byte("test"))
+ }
+ config, ln := testHTTPServer(t, http.HandlerFunc(primary))
+ defer ln.Close()
+
+ standby := func(w http.ResponseWriter, req *http.Request) {
+ w.Header().Set("Location", config.Address)
+ w.WriteHeader(307)
+ }
+ config2, ln2 := testHTTPServer(t, http.HandlerFunc(standby))
+ defer ln2.Close()
+
+ client, err := NewClient(config2)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Set the token manually
+ client.SetToken("foo")
+
+ // Do a raw "/" request
+ resp, err := client.RawRequest(client.NewRequest("PUT", "/"))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Copy the response
+ var buf bytes.Buffer
+ io.Copy(&buf, resp.Body)
+
+ // Verify we got the response from the primary
+ if buf.String() != "test" {
+ t.Fatalf("Bad: %s", buf.String())
+ }
+}
+
+func TestClientEnvSettings(t *testing.T) {
+ cwd, _ := os.Getwd()
+ oldCACert := os.Getenv(EnvVaultCACert)
+ oldCAPath := os.Getenv(EnvVaultCAPath)
+ oldClientCert := os.Getenv(EnvVaultClientCert)
+ oldClientKey := os.Getenv(EnvVaultClientKey)
+ oldSkipVerify := os.Getenv(EnvVaultInsecure)
+ oldMaxRetries := os.Getenv(EnvVaultMaxRetries)
+ os.Setenv(EnvVaultCACert, cwd+"/test-fixtures/keys/cert.pem")
+ os.Setenv(EnvVaultCAPath, cwd+"/test-fixtures/keys")
+ os.Setenv(EnvVaultClientCert, cwd+"/test-fixtures/keys/cert.pem")
+ os.Setenv(EnvVaultClientKey, cwd+"/test-fixtures/keys/key.pem")
+ os.Setenv(EnvVaultInsecure, "true")
+ os.Setenv(EnvVaultMaxRetries, "5")
+ defer os.Setenv(EnvVaultCACert, oldCACert)
+ defer os.Setenv(EnvVaultCAPath, oldCAPath)
+ defer os.Setenv(EnvVaultClientCert, oldClientCert)
+ defer os.Setenv(EnvVaultClientKey, oldClientKey)
+ defer os.Setenv(EnvVaultInsecure, oldSkipVerify)
+ defer os.Setenv(EnvVaultMaxRetries, oldMaxRetries)
+
+ config := DefaultConfig()
+ if err := config.ReadEnvironment(); err != nil {
+ t.Fatalf("error reading environment: %v", err)
+ }
+
+ tlsConfig := config.HttpClient.Transport.(*http.Transport).TLSClientConfig
+ if len(tlsConfig.RootCAs.Subjects()) == 0 {
+ t.Fatalf("bad: expected a cert pool with at least one subject")
+ }
+ if len(tlsConfig.Certificates) != 1 {
+ t.Fatalf("bad: expected client tls config to have a client certificate")
+ }
+ if tlsConfig.InsecureSkipVerify != true {
+ t.Fatalf("bad: %v", tlsConfig.InsecureSkipVerify)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/api/request_test.go b/vendor/github.com/hashicorp/vault/api/request_test.go
new file mode 100644
index 0000000..904f59a
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/request_test.go
@@ -0,0 +1,63 @@
+package api
+
+import (
+ "bytes"
+ "io"
+ "strings"
+ "testing"
+)
+
+func TestRequestSetJSONBody(t *testing.T) {
+ var r Request
+ raw := map[string]interface{}{"foo": "bar"}
+ if err := r.SetJSONBody(raw); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ var buf bytes.Buffer
+ if _, err := io.Copy(&buf, r.Body); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ expected := `{"foo":"bar"}`
+ actual := strings.TrimSpace(buf.String())
+ if actual != expected {
+ t.Fatalf("bad: %s", actual)
+ }
+
+ if int64(len(buf.String())) != r.BodySize {
+ t.Fatalf("bad: %d", len(actual))
+ }
+}
+
+func TestRequestResetJSONBody(t *testing.T) {
+ var r Request
+ raw := map[string]interface{}{"foo": "bar"}
+ if err := r.SetJSONBody(raw); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ var buf bytes.Buffer
+ if _, err := io.Copy(&buf, r.Body); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err := r.ResetJSONBody(); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ var buf2 bytes.Buffer
+ if _, err := io.Copy(&buf2, r.Body); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ expected := `{"foo":"bar"}`
+ actual := strings.TrimSpace(buf2.String())
+ if actual != expected {
+ t.Fatalf("bad: %s", actual)
+ }
+
+ if int64(len(buf2.String())) != r.BodySize {
+ t.Fatalf("bad: %d", len(actual))
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/api/secret_test.go b/vendor/github.com/hashicorp/vault/api/secret_test.go
new file mode 100644
index 0000000..3b64966
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/secret_test.go
@@ -0,0 +1,57 @@
+package api
+
+import (
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestParseSecret(t *testing.T) {
+ raw := strings.TrimSpace(`
+{
+ "lease_id": "foo",
+ "renewable": true,
+ "lease_duration": 10,
+ "data": {
+ "key": "value"
+ },
+ "warnings": [
+ "a warning!"
+ ],
+ "wrap_info": {
+ "token": "token",
+ "ttl": 60,
+ "creation_time": "2016-06-07T15:52:10-04:00",
+ "wrapped_accessor": "abcd1234"
+ }
+}`)
+
+ rawTime, _ := time.Parse(time.RFC3339, "2016-06-07T15:52:10-04:00")
+
+ secret, err := ParseSecret(strings.NewReader(raw))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ expected := &Secret{
+ LeaseID: "foo",
+ Renewable: true,
+ LeaseDuration: 10,
+ Data: map[string]interface{}{
+ "key": "value",
+ },
+ Warnings: []string{
+ "a warning!",
+ },
+ WrapInfo: &SecretWrapInfo{
+ Token: "token",
+ TTL: 60,
+ CreationTime: rawTime,
+ WrappedAccessor: "abcd1234",
+ },
+ }
+ if !reflect.DeepEqual(secret, expected) {
+ t.Fatalf("bad:\ngot\n%#v\nexpected\n%#v\n", secret, expected)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/api/ssh_agent_test.go b/vendor/github.com/hashicorp/vault/api/ssh_agent_test.go
new file mode 100644
index 0000000..dfef4b8
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/ssh_agent_test.go
@@ -0,0 +1,110 @@
+package api
+
+import (
+ "fmt"
+ "net/http"
+ "strings"
+ "testing"
+)
+
+func TestSSH_CreateTLSClient(t *testing.T) {
+ // load the default configuration
+ config, err := LoadSSHHelperConfig("./test-fixtures/agent_config.hcl")
+ if err != nil {
+ panic(fmt.Sprintf("error loading agent's config file: %s", err))
+ }
+
+ client, err := config.NewClient()
+ if err != nil {
+ panic(fmt.Sprintf("error creating the client: %s", err))
+ }
+
+ // Provide a certificate and enforce setting of transport
+ config.CACert = "./test-fixtures/vault.crt"
+
+ client, err = config.NewClient()
+ if err != nil {
+ panic(fmt.Sprintf("error creating the client: %s", err))
+ }
+ if client.config.HttpClient.Transport == nil {
+ panic(fmt.Sprintf("error creating client with TLS transport"))
+ }
+}
+
+func TestSSH_CreateTLSClient_tlsServerName(t *testing.T) {
+ // Ensure that the HTTP client is associated with the configured TLS server name.
+ var tlsServerName = "tls.server.name"
+
+ config, err := ParseSSHHelperConfig(fmt.Sprintf(`
+vault_addr = "1.2.3.4"
+tls_server_name = "%s"
+`, tlsServerName))
+ if err != nil {
+ panic(fmt.Sprintf("error loading config: %s", err))
+ }
+
+ client, err := config.NewClient()
+ if err != nil {
+ panic(fmt.Sprintf("error creating the client: %s", err))
+ }
+
+ actualTLSServerName := client.config.HttpClient.Transport.(*http.Transport).TLSClientConfig.ServerName
+ if actualTLSServerName != tlsServerName {
+ panic(fmt.Sprintf("incorrect TLS server name. expected: %s actual: %s", tlsServerName, actualTLSServerName))
+ }
+}
+
+func TestParseSSHHelperConfig(t *testing.T) {
+ config, err := ParseSSHHelperConfig(`
+ vault_addr = "1.2.3.4"
+`)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if config.SSHMountPoint != SSHHelperDefaultMountPoint {
+ t.Errorf("expected %q to be %q", config.SSHMountPoint, SSHHelperDefaultMountPoint)
+ }
+}
+
+func TestParseSSHHelperConfig_missingVaultAddr(t *testing.T) {
+ _, err := ParseSSHHelperConfig("")
+ if err == nil {
+ t.Fatal("expected error")
+ }
+
+ if !strings.Contains(err.Error(), "ssh_helper: missing config 'vault_addr'") {
+ t.Errorf("bad error: %s", err)
+ }
+}
+
+func TestParseSSHHelperConfig_badKeys(t *testing.T) {
+ _, err := ParseSSHHelperConfig(`
+vault_addr = "1.2.3.4"
+nope = "bad"
+`)
+ if err == nil {
+ t.Fatal("expected error")
+ }
+
+ if !strings.Contains(err.Error(), "ssh_helper: invalid key 'nope' on line 3") {
+ t.Errorf("bad error: %s", err)
+ }
+}
+
+func TestParseSSHHelperConfig_tlsServerName(t *testing.T) {
+ var tlsServerName = "tls.server.name"
+
+ config, err := ParseSSHHelperConfig(fmt.Sprintf(`
+vault_addr = "1.2.3.4"
+tls_server_name = "%s"
+`, tlsServerName))
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if config.TLSServerName != tlsServerName {
+ t.Errorf("incorrect TLS server name. expected: %s actual: %s", tlsServerName, config.TLSServerName)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/api/test-fixtures/keys/cert.pem b/vendor/github.com/hashicorp/vault/api/test-fixtures/keys/cert.pem
new file mode 100644
index 0000000..942d266
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/test-fixtures/keys/cert.pem
@@ -0,0 +1,22 @@
+-----BEGIN CERTIFICATE-----
+MIIDtTCCAp2gAwIBAgIUf+jhKTFBnqSs34II0WS1L4QsbbAwDQYJKoZIhvcNAQEL
+BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzQxWhcNMjUw
+MTA1MTAyODExWjAbMRkwFwYDVQQDExBjZXJ0LmV4YW1wbGUuY29tMIIBIjANBgkq
+hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxS
+TRAVnygAftetT8puHflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGn
+SgMld6ZWRhNheZhA6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmi
+YYMiIWplidMmMO5NTRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5
+donyqtnaHuIJGuUdy54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVG
+B+5+AAGF5iuHC3N2DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABo4H1
+MIHyMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUm++e
+HpyM3p708bgZJuRYEdX1o+UwHwYDVR0jBBgwFoAUncSzT/6HMexyuiU9/7EgHu+o
+k5swOwYIKwYBBQUHAQEELzAtMCsGCCsGAQUFBzAChh9odHRwOi8vMTI3LjAuMC4x
+OjgyMDAvdjEvcGtpL2NhMCEGA1UdEQQaMBiCEGNlcnQuZXhhbXBsZS5jb22HBH8A
+AAEwMQYDVR0fBCowKDAmoCSgIoYgaHR0cDovLzEyNy4wLjAuMTo4MjAwL3YxL3Br
+aS9jcmwwDQYJKoZIhvcNAQELBQADggEBABsuvmPSNjjKTVN6itWzdQy+SgMIrwfs
+X1Yb9Lefkkwmp9ovKFNQxa4DucuCuzXcQrbKwWTfHGgR8ct4rf30xCRoA7dbQWq4
+aYqNKFWrRaBRAaaYZ/O1ApRTOrXqRx9Eqr0H1BXLsoAq+mWassL8sf6siae+CpwA
+KqBko5G0dNXq5T4i2LQbmoQSVetIrCJEeMrU+idkuqfV2h1BQKgSEhFDABjFdTCN
+QDAHsEHsi2M4/jRW9fqEuhHSDfl2n7tkFUI8wTHUUCl7gXwweJ4qtaSXIwKXYzNj
+xqKHA8Purc1Yfybz4iE1JCROi9fInKlzr5xABq8nb9Qc/J9DIQM+Xmk=
+-----END CERTIFICATE-----
diff --git a/vendor/github.com/hashicorp/vault/api/test-fixtures/keys/key.pem b/vendor/github.com/hashicorp/vault/api/test-fixtures/keys/key.pem
new file mode 100644
index 0000000..add9820
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/test-fixtures/keys/key.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxSTRAVnygAftetT8pu
+HflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGnSgMld6ZWRhNheZhA
+6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmiYYMiIWplidMmMO5N
+TRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5donyqtnaHuIJGuUd
+y54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVGB+5+AAGF5iuHC3N2
+DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABAoIBAHR7fFV0eAGaopsX
+9OD0TUGlsephBXb43g0GYHfJ/1Ew18w9oaxszJEqkl+PB4W3xZ3yG3e8ZomxDOhF
+RreF2WgG5xOfhDogMwu6NodbArfgnAvoC6JnW3qha8HMP4F500RFVyCRcd6A3Frd
+rFtaZn/UyCsBAN8/zkwPeYHayo7xX6d9kzgRl9HluEX5PXI5+3uiBDUiM085gkLI
+5Cmadh9fMdjfhDXI4x2JYmILpp/9Nlc/krB15s5n1MPNtn3yL0TI0tWp0WlwDCV7
+oUm1SfIM0F1fXGFyFDcqwoIr6JCQgXk6XtTg31YhH1xgUIclUVdtHqmAwAbLdIhQ
+GAiHn2kCgYEAwD4pZ8HfpiOG/EHNoWsMATc/5yC7O8F9WbvcHZQIymLY4v/7HKZb
+VyOR6UQ5/O2cztSGIuKSF6+OK1C34lOyCuTSOTFrjlgEYtLIXjdGLfFdtOO8GRQR
+akVXdwuzNAjTBaH5eXbG+NKcjmCvZL48dQVlfDTVulzFGbcsVTHIMQUCgYEA7IQI
+FVsKnY3KqpyGqXq92LMcsT3XgW6X1BIIV+YhJ5AFUFkFrjrbXs94/8XyLfi0xBQy
+efK+8g5sMs7koF8LyZEcAXWZJQduaKB71hoLlRaU4VQkL/dl2B6VFmAII/CsRCYh
+r9RmDN2PF/mp98Ih9dpC1VqcCDRGoTYsd7jLalMCgYAMgH5k1wDaZxkSMp1S0AlZ
+0uP+/evvOOgT+9mWutfPgZolOQx1koQCKLgGeX9j6Xf3I28NubpSfAI84uTyfQrp
+FnRtb79U5Hh0jMynA+U2e6niZ6UF5H41cQj9Hu+qhKBkj2IP+h96cwfnYnZFkPGR
+kqZE65KyqfHPeFATwkcImQKBgCdrfhlpGiTWXCABhKQ8s+WpPLAB2ahV8XJEKyXT
+UlVQuMIChGLcpnFv7P/cUxf8asx/fUY8Aj0/0CLLvulHziQjTmKj4gl86pb/oIQ3
+xRRtNhU0O+/OsSfLORgIm3K6C0w0esregL/GMbJSR1TnA1gBr7/1oSnw5JC8Ab9W
+injHAoGAJT1MGAiQrhlt9GCGe6Ajw4omdbY0wS9NXefnFhf7EwL0es52ezZ28zpU
+2LXqSFbtann5CHgpSLxiMYPDIf+er4xgg9Bz34tz1if1rDfP2Qrxdrpr4jDnrGT3
+gYC2qCpvVD9RRUMKFfnJTfl5gMQdBW/LINkHtJ82snAeLl3gjQ4=
+-----END RSA PRIVATE KEY-----
diff --git a/vendor/github.com/hashicorp/vault/api/test-fixtures/keys/pkioutput b/vendor/github.com/hashicorp/vault/api/test-fixtures/keys/pkioutput
new file mode 100644
index 0000000..526ff03
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/test-fixtures/keys/pkioutput
@@ -0,0 +1,74 @@
+Key Value
+lease_id pki/issue/example-dot-com/d8214077-9976-8c68-9c07-6610da30aea4
+lease_duration 279359999
+lease_renewable false
+certificate -----BEGIN CERTIFICATE-----
+MIIDtTCCAp2gAwIBAgIUf+jhKTFBnqSs34II0WS1L4QsbbAwDQYJKoZIhvcNAQEL
+BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzQxWhcNMjUw
+MTA1MTAyODExWjAbMRkwFwYDVQQDExBjZXJ0LmV4YW1wbGUuY29tMIIBIjANBgkq
+hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxS
+TRAVnygAftetT8puHflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGn
+SgMld6ZWRhNheZhA6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmi
+YYMiIWplidMmMO5NTRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5
+donyqtnaHuIJGuUdy54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVG
+B+5+AAGF5iuHC3N2DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABo4H1
+MIHyMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUm++e
+HpyM3p708bgZJuRYEdX1o+UwHwYDVR0jBBgwFoAUncSzT/6HMexyuiU9/7EgHu+o
+k5swOwYIKwYBBQUHAQEELzAtMCsGCCsGAQUFBzAChh9odHRwOi8vMTI3LjAuMC4x
+OjgyMDAvdjEvcGtpL2NhMCEGA1UdEQQaMBiCEGNlcnQuZXhhbXBsZS5jb22HBH8A
+AAEwMQYDVR0fBCowKDAmoCSgIoYgaHR0cDovLzEyNy4wLjAuMTo4MjAwL3YxL3Br
+aS9jcmwwDQYJKoZIhvcNAQELBQADggEBABsuvmPSNjjKTVN6itWzdQy+SgMIrwfs
+X1Yb9Lefkkwmp9ovKFNQxa4DucuCuzXcQrbKwWTfHGgR8ct4rf30xCRoA7dbQWq4
+aYqNKFWrRaBRAaaYZ/O1ApRTOrXqRx9Eqr0H1BXLsoAq+mWassL8sf6siae+CpwA
+KqBko5G0dNXq5T4i2LQbmoQSVetIrCJEeMrU+idkuqfV2h1BQKgSEhFDABjFdTCN
+QDAHsEHsi2M4/jRW9fqEuhHSDfl2n7tkFUI8wTHUUCl7gXwweJ4qtaSXIwKXYzNj
+xqKHA8Purc1Yfybz4iE1JCROi9fInKlzr5xABq8nb9Qc/J9DIQM+Xmk=
+-----END CERTIFICATE-----
+issuing_ca -----BEGIN CERTIFICATE-----
+MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL
+BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw
+MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7
+Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0
+z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x
+AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb
+6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH
+SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G
+A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx
+7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc
+BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA
+wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2
+U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa
+cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N
+ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ
+t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk
+zehNe5dFTjFpylg1o6b8Ow==
+-----END CERTIFICATE-----
+private_key -----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxSTRAVnygAftetT8pu
+HflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGnSgMld6ZWRhNheZhA
+6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmiYYMiIWplidMmMO5N
+TRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5donyqtnaHuIJGuUd
+y54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVGB+5+AAGF5iuHC3N2
+DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABAoIBAHR7fFV0eAGaopsX
+9OD0TUGlsephBXb43g0GYHfJ/1Ew18w9oaxszJEqkl+PB4W3xZ3yG3e8ZomxDOhF
+RreF2WgG5xOfhDogMwu6NodbArfgnAvoC6JnW3qha8HMP4F500RFVyCRcd6A3Frd
+rFtaZn/UyCsBAN8/zkwPeYHayo7xX6d9kzgRl9HluEX5PXI5+3uiBDUiM085gkLI
+5Cmadh9fMdjfhDXI4x2JYmILpp/9Nlc/krB15s5n1MPNtn3yL0TI0tWp0WlwDCV7
+oUm1SfIM0F1fXGFyFDcqwoIr6JCQgXk6XtTg31YhH1xgUIclUVdtHqmAwAbLdIhQ
+GAiHn2kCgYEAwD4pZ8HfpiOG/EHNoWsMATc/5yC7O8F9WbvcHZQIymLY4v/7HKZb
+VyOR6UQ5/O2cztSGIuKSF6+OK1C34lOyCuTSOTFrjlgEYtLIXjdGLfFdtOO8GRQR
+akVXdwuzNAjTBaH5eXbG+NKcjmCvZL48dQVlfDTVulzFGbcsVTHIMQUCgYEA7IQI
+FVsKnY3KqpyGqXq92LMcsT3XgW6X1BIIV+YhJ5AFUFkFrjrbXs94/8XyLfi0xBQy
+efK+8g5sMs7koF8LyZEcAXWZJQduaKB71hoLlRaU4VQkL/dl2B6VFmAII/CsRCYh
+r9RmDN2PF/mp98Ih9dpC1VqcCDRGoTYsd7jLalMCgYAMgH5k1wDaZxkSMp1S0AlZ
+0uP+/evvOOgT+9mWutfPgZolOQx1koQCKLgGeX9j6Xf3I28NubpSfAI84uTyfQrp
+FnRtb79U5Hh0jMynA+U2e6niZ6UF5H41cQj9Hu+qhKBkj2IP+h96cwfnYnZFkPGR
+kqZE65KyqfHPeFATwkcImQKBgCdrfhlpGiTWXCABhKQ8s+WpPLAB2ahV8XJEKyXT
+UlVQuMIChGLcpnFv7P/cUxf8asx/fUY8Aj0/0CLLvulHziQjTmKj4gl86pb/oIQ3
+xRRtNhU0O+/OsSfLORgIm3K6C0w0esregL/GMbJSR1TnA1gBr7/1oSnw5JC8Ab9W
+injHAoGAJT1MGAiQrhlt9GCGe6Ajw4omdbY0wS9NXefnFhf7EwL0es52ezZ28zpU
+2LXqSFbtann5CHgpSLxiMYPDIf+er4xgg9Bz34tz1if1rDfP2Qrxdrpr4jDnrGT3
+gYC2qCpvVD9RRUMKFfnJTfl5gMQdBW/LINkHtJ82snAeLl3gjQ4=
+-----END RSA PRIVATE KEY-----
+private_key_type rsa
diff --git a/vendor/github.com/hashicorp/vault/api/test-fixtures/root/pkioutput b/vendor/github.com/hashicorp/vault/api/test-fixtures/root/pkioutput
new file mode 100644
index 0000000..312ae18
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/test-fixtures/root/pkioutput
@@ -0,0 +1,74 @@
+Key Value
+lease_id pki/root/generate/exported/7bf99d76-dd3e-2c5b-04ce-5253062ad586
+lease_duration 315359999
+lease_renewable false
+certificate -----BEGIN CERTIFICATE-----
+MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL
+BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw
+MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7
+Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0
+z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x
+AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb
+6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH
+SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G
+A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx
+7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc
+BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA
+wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2
+U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa
+cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N
+ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ
+t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk
+zehNe5dFTjFpylg1o6b8Ow==
+-----END CERTIFICATE-----
+expiration 1.772072879e+09
+issuing_ca -----BEGIN CERTIFICATE-----
+MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL
+BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw
+MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7
+Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0
+z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x
+AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb
+6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH
+SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G
+A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx
+7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc
+BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA
+wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2
+U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa
+cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N
+ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ
+t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk
+zehNe5dFTjFpylg1o6b8Ow==
+-----END CERTIFICATE-----
+private_key -----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEA7FMy+FO4hFzZiHFmmY/B6A/zpyCep9PWZfGLUDtDuprHbg2p
+t/TQjegMxC0AmWPZEHeG9FIJvT+WQXuLUG5v5MbG4zs21mqnYXwWAbTPaZ35bpp3
+BbbquXFtAqOZG8yfrob1g9OSgmY+bG3ZNxxv35qmbBbuWyUvmPwfjzEAclMxv48w
+/2Rs4dXkBuvc9PiNdQ9Sv+ZYG8GIqIcbRd38cSaXI4Q94BOHEr4jm1vqb54H7twv
+0Gy9CnLXfn7ZtEDFSmG8WLk2GvIna+UC+gnxSqKCw0rcTby63rQUCgdJZF8VQUVi
+18+BMNLXI4pT/P9cxTaCJC/YeuV5a3SaniOoFQIDAQABAoIBAQCoGZJC84JnnIgb
+ttZNWuWKBXbCJcDVDikOQJ9hBZbqsFg1X0CfGmQS3MHf9Ubc1Ro8zVjQh15oIEfn
+8lIpdzTeXcpxLdiW8ix3ekVJF20F6pnXY8ZP6UnTeOwamXY6QPZAtb0D9UXcvY+f
+nw+IVRD6082XS0Rmzu+peYWVXDy+FDN+HJRANBcdJZz8gOmNBIe0qDWx1b85d/s8
+2Kk1Wwdss1IwAGeSddTSwzBNaaHdItZaMZOqPW1gRyBfVSkcUQIE6zn2RKw2b70t
+grkIvyRcTdfmiKbqkkJ+eR+ITOUt0cBZSH4cDjlQA+r7hulvoBpQBRj068Toxkcc
+bTagHaPBAoGBAPWPGVkHqhTbJ/DjmqDIStxby2M1fhhHt4xUGHinhUYjQjGOtDQ9
+0mfaB7HObudRiSLydRAVGAHGyNJdQcTeFxeQbovwGiYKfZSA1IGpea7dTxPpGEdN
+ksA0pzSp9MfKzX/MdLuAkEtO58aAg5YzsgX9hDNxo4MhH/gremZhEGZlAoGBAPZf
+lqdYvAL0fjHGJ1FUEalhzGCGE9PH2iOqsxqLCXK7bDbzYSjvuiHkhYJHAOgVdiW1
+lB34UHHYAqZ1VVoFqJ05gax6DE2+r7K5VV3FUCaC0Zm3pavxchU9R/TKP82xRrBj
+AFWwdgDTxUyvQEmgPR9sqorftO71Iz2tiwyTpIfxAoGBAIhEMLzHFAse0rtKkrRG
+ccR27BbRyHeQ1Lp6sFnEHKEfT8xQdI/I/snCpCJ3e/PBu2g5Q9z416mktiyGs8ib
+thTNgYsGYnxZtfaCx2pssanoBcn2wBJRae5fSapf5gY49HDG9MBYR7qCvvvYtSzU
+4yWP2ZzyotpRt3vwJKxLkN5BAoGAORHpZvhiDNkvxj3da7Rqpu7VleJZA2y+9hYb
+iOF+HcqWhaAY+I+XcTRrTMM/zYLzLEcEeXDEyao86uwxCjpXVZw1kotvAC9UqbTO
+tnr3VwRkoxPsV4kFYTAh0+1pnC8dbcxxDmhi3Uww3tOVs7hfkEDuvF6XnebA9A+Y
+LyCgMzECgYEA6cCU8QODOivIKWFRXucvWckgE6MYDBaAwe6qcLsd1Q/gpE2e3yQc
+4RB3bcyiPROLzMLlXFxf1vSNJQdIaVfrRv+zJeGIiivLPU8+Eq4Lrb+tl1LepcOX
+OzQeADTSCn5VidOfjDkIst9UXjMlrFfV9/oJEw5Eiqa6lkNPCGDhfA8=
+-----END RSA PRIVATE KEY-----
+private_key_type rsa
+serial_number 6f:98:9d:f8:67:1a:31:e3:27:60:1b:f7:32:f7:53:19:68:a0:c8:9d
diff --git a/vendor/github.com/hashicorp/vault/api/test-fixtures/root/root.crl b/vendor/github.com/hashicorp/vault/api/test-fixtures/root/root.crl
new file mode 100644
index 0000000..a80c9e4
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/test-fixtures/root/root.crl
@@ -0,0 +1,12 @@
+-----BEGIN X509 CRL-----
+MIIBrjCBlzANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbRcN
+MTYwMjI5MDIyOTE3WhcNMjUwMTA1MTAyOTE3WjArMCkCFG+YnfhnGjHjJ2Ab9zL3
+UxlooMidFxExNjAyMjgyMTI5MTctMDUwMKAjMCEwHwYDVR0jBBgwFoAUncSzT/6H
+MexyuiU9/7EgHu+ok5swDQYJKoZIhvcNAQELBQADggEBAG9YDXpNe4LJroKZmVCn
+HqMhW8eyzyaPak2nPPGCVUnc6vt8rlBYQU+xlBizD6xatZQDMPgrT8sBl9W3ysXk
+RUlliHsT/SHddMz5dAZsBPRMJ7pYWLTx8jI4w2WRfbSyI4bY/6qTRNkEBUv+Fk8J
+xvwB89+EM0ENcVMhv9ghsUA8h7kOg673HKwRstLDAzxS/uLmEzFjj8SV2m5DbV2Y
+UUCKRSV20/kxJMIC9x2KikZhwOSyv1UE1otD+RQvbfAoZPUDmvp2FR/E0NGjBBOg
+1TtCPRrl63cjqU3s8KQ4uah9Vj+Cwcu9n/yIKKtNQq4NKHvagv8GlUsoJ4BdAxCw
+IA0=
+-----END X509 CRL-----
diff --git a/vendor/github.com/hashicorp/vault/api/test-fixtures/root/rootcacert.pem b/vendor/github.com/hashicorp/vault/api/test-fixtures/root/rootcacert.pem
new file mode 100644
index 0000000..dcb307a
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/test-fixtures/root/rootcacert.pem
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL
+BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw
+MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7
+Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0
+z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x
+AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb
+6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH
+SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G
+A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx
+7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc
+BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA
+wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2
+U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa
+cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N
+ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ
+t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk
+zehNe5dFTjFpylg1o6b8Ow==
+-----END CERTIFICATE-----
diff --git a/vendor/github.com/hashicorp/vault/api/test-fixtures/root/rootcakey.pem b/vendor/github.com/hashicorp/vault/api/test-fixtures/root/rootcakey.pem
new file mode 100644
index 0000000..e950da5
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/test-fixtures/root/rootcakey.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEA7FMy+FO4hFzZiHFmmY/B6A/zpyCep9PWZfGLUDtDuprHbg2p
+t/TQjegMxC0AmWPZEHeG9FIJvT+WQXuLUG5v5MbG4zs21mqnYXwWAbTPaZ35bpp3
+BbbquXFtAqOZG8yfrob1g9OSgmY+bG3ZNxxv35qmbBbuWyUvmPwfjzEAclMxv48w
+/2Rs4dXkBuvc9PiNdQ9Sv+ZYG8GIqIcbRd38cSaXI4Q94BOHEr4jm1vqb54H7twv
+0Gy9CnLXfn7ZtEDFSmG8WLk2GvIna+UC+gnxSqKCw0rcTby63rQUCgdJZF8VQUVi
+18+BMNLXI4pT/P9cxTaCJC/YeuV5a3SaniOoFQIDAQABAoIBAQCoGZJC84JnnIgb
+ttZNWuWKBXbCJcDVDikOQJ9hBZbqsFg1X0CfGmQS3MHf9Ubc1Ro8zVjQh15oIEfn
+8lIpdzTeXcpxLdiW8ix3ekVJF20F6pnXY8ZP6UnTeOwamXY6QPZAtb0D9UXcvY+f
+nw+IVRD6082XS0Rmzu+peYWVXDy+FDN+HJRANBcdJZz8gOmNBIe0qDWx1b85d/s8
+2Kk1Wwdss1IwAGeSddTSwzBNaaHdItZaMZOqPW1gRyBfVSkcUQIE6zn2RKw2b70t
+grkIvyRcTdfmiKbqkkJ+eR+ITOUt0cBZSH4cDjlQA+r7hulvoBpQBRj068Toxkcc
+bTagHaPBAoGBAPWPGVkHqhTbJ/DjmqDIStxby2M1fhhHt4xUGHinhUYjQjGOtDQ9
+0mfaB7HObudRiSLydRAVGAHGyNJdQcTeFxeQbovwGiYKfZSA1IGpea7dTxPpGEdN
+ksA0pzSp9MfKzX/MdLuAkEtO58aAg5YzsgX9hDNxo4MhH/gremZhEGZlAoGBAPZf
+lqdYvAL0fjHGJ1FUEalhzGCGE9PH2iOqsxqLCXK7bDbzYSjvuiHkhYJHAOgVdiW1
+lB34UHHYAqZ1VVoFqJ05gax6DE2+r7K5VV3FUCaC0Zm3pavxchU9R/TKP82xRrBj
+AFWwdgDTxUyvQEmgPR9sqorftO71Iz2tiwyTpIfxAoGBAIhEMLzHFAse0rtKkrRG
+ccR27BbRyHeQ1Lp6sFnEHKEfT8xQdI/I/snCpCJ3e/PBu2g5Q9z416mktiyGs8ib
+thTNgYsGYnxZtfaCx2pssanoBcn2wBJRae5fSapf5gY49HDG9MBYR7qCvvvYtSzU
+4yWP2ZzyotpRt3vwJKxLkN5BAoGAORHpZvhiDNkvxj3da7Rqpu7VleJZA2y+9hYb
+iOF+HcqWhaAY+I+XcTRrTMM/zYLzLEcEeXDEyao86uwxCjpXVZw1kotvAC9UqbTO
+tnr3VwRkoxPsV4kFYTAh0+1pnC8dbcxxDmhi3Uww3tOVs7hfkEDuvF6XnebA9A+Y
+LyCgMzECgYEA6cCU8QODOivIKWFRXucvWckgE6MYDBaAwe6qcLsd1Q/gpE2e3yQc
+4RB3bcyiPROLzMLlXFxf1vSNJQdIaVfrRv+zJeGIiivLPU8+Eq4Lrb+tl1LepcOX
+OzQeADTSCn5VidOfjDkIst9UXjMlrFfV9/oJEw5Eiqa6lkNPCGDhfA8=
+-----END RSA PRIVATE KEY-----
diff --git a/vendor/github.com/hashicorp/vault/api/test-fixtures/vault.crt b/vendor/github.com/hashicorp/vault/api/test-fixtures/vault.crt
new file mode 100644
index 0000000..3e34cf1
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/test-fixtures/vault.crt
@@ -0,0 +1,24 @@
+-----BEGIN CERTIFICATE-----
+MIIEEjCCAvqgAwIBAgIJAM7PFmA6Y+KeMA0GCSqGSIb3DQEBCwUAMIGWMQswCQYD
+VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFDASBgNVBAcMC1N0b255IEJyb29r
+MRIwEAYDVQQKDAlIYXNoaUNvcnAxDjAMBgNVBAsMBVZhdWx0MRUwEwYDVQQDDAxW
+aXNoYWwgTmF5YWsxIzAhBgkqhkiG9w0BCQEWFHZpc2hhbEBoYXNoaWNvcnAuY29t
+MB4XDTE1MDgwNzE5MTk1OFoXDTE1MDkwNjE5MTk1OFowgZYxCzAJBgNVBAYTAlVT
+MREwDwYDVQQIDAhOZXcgWW9yazEUMBIGA1UEBwwLU3RvbnkgQnJvb2sxEjAQBgNV
+BAoMCUhhc2hpQ29ycDEOMAwGA1UECwwFVmF1bHQxFTATBgNVBAMMDFZpc2hhbCBO
+YXlhazEjMCEGCSqGSIb3DQEJARYUdmlzaGFsQGhhc2hpY29ycC5jb20wggEiMA0G
+CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCcGlPKIrsq5sDJAUB7mtLjnjbcfR0b
+dX1sDHUaTdT+2YBq0JvtoLZOmKw1iVwsMBhaLeXwnKP/O/n67sE8zvZPsuU3REw1
+NTjPof8IbepkENWNxR68KoSB2Vn5r4KiO3ux+KbkXssrZB62+k9khj0e7qIiwyZP
+y5+RQPOL2ESmX5DznX+90vH4mzAEF654PbXFI/qOBZcWvWZJ37i+lHkeyCqcB+sm
+5o5+zd1ua8jVlN0eLjyqa7FDvIuXPAFEX+r5DVQgIvS2++YaFRqTFCIxRXdDQXdw
+1xDMCuG1w4PGVWf3TtlpHeGSIU07DdrCgXsvIRYfW++aZ2pvXwJYCr8hAgMBAAGj
+YTBfMA8GA1UdEQQIMAaHBKwYFugwHQYDVR0OBBYEFPl+AkButpRfbblZE9Jb3xNj
+AyhkMB8GA1UdIwQYMBaAFPl+AkButpRfbblZE9Jb3xNjAyhkMAwGA1UdEwQFMAMB
+Af8wDQYJKoZIhvcNAQELBQADggEBADdIyyBJ3BVghW1shhxYsqQgg/gj2TagpO1P
+ulGNzS0aCfB4tzMD4MGWm7cTlL6QW9W6r9OuWKCd1ADherIX9j0gtVWgIMtWGx+i
+NbHrYin1xHr4rkB7/f6veCiJ3CtzBC9P/rEI6keyfOn1BfQBsOxfo3oGe/HDlSzD
+lpu0GlQECjTXD7dd4jrD0T/wdRQI0BmxcYjn9cZLgoJHtLHZwaS16TGVmKs4iRAW
+V9Aw5hLK4jJ59IID830/ly+Ndfc//QGgdE5PM44OrvVFO3Q8+zs7pwr1ql7uQWew
+MSuDfbL7EcEGajD/o085sj2u4xVUfkVBW+3TQvs4/pHYOxlhPjI=
+-----END CERTIFICATE-----
diff --git a/vendor/github.com/hashicorp/vault/audit/audit.go b/vendor/github.com/hashicorp/vault/audit/audit.go
new file mode 100644
index 0000000..dffa8ee
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/audit/audit.go
@@ -0,0 +1,43 @@
+package audit
+
+import (
+ "github.com/hashicorp/vault/helper/salt"
+ "github.com/hashicorp/vault/logical"
+)
+
+// Backend interface must be implemented for an audit
+// mechanism to be made available. Audit backends can be enabled to
+// sink information to different backends such as logs, file, databases,
+// or other external services.
+type Backend interface {
+ // LogRequest is used to synchronously log a request. This is done after the
+ // request is authorized but before the request is executed. The arguments
+ // MUST not be modified in anyway. They should be deep copied if this is
+ // a possibility.
+ LogRequest(*logical.Auth, *logical.Request, error) error
+
+ // LogResponse is used to synchronously log a response. This is done after
+ // the request is processed but before the response is sent. The arguments
+ // MUST not be modified in anyway. They should be deep copied if this is
+ // a possibility.
+ LogResponse(*logical.Auth, *logical.Request, *logical.Response, error) error
+
+ // GetHash is used to return the given data with the backend's hash,
+ // so that a caller can determine if a value in the audit log matches
+ // an expected plaintext value
+ GetHash(string) string
+
+ // Reload is called on SIGHUP for supporting backends.
+ Reload() error
+}
+
+type BackendConfig struct {
+ // The salt that should be used for any secret obfuscation
+ Salt *salt.Salt
+
+ // Config is the opaque user configuration provided when mounting
+ Config map[string]string
+}
+
+// Factory is the factory function to create an audit backend.
+type Factory func(*BackendConfig) (Backend, error)
diff --git a/vendor/github.com/hashicorp/vault/audit/format.go b/vendor/github.com/hashicorp/vault/audit/format.go
new file mode 100644
index 0000000..919da12
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/audit/format.go
@@ -0,0 +1,410 @@
+package audit
+
+import (
+ "fmt"
+ "io"
+ "strings"
+ "time"
+
+ "github.com/SermoDigital/jose/jws"
+ "github.com/hashicorp/vault/logical"
+ "github.com/mitchellh/copystructure"
+)
+
+type AuditFormatWriter interface {
+ WriteRequest(io.Writer, *AuditRequestEntry) error
+ WriteResponse(io.Writer, *AuditResponseEntry) error
+}
+
+// AuditFormatter implements the Formatter interface, and allows the underlying
+// marshaller to be swapped out
+type AuditFormatter struct {
+ AuditFormatWriter
+}
+
+func (f *AuditFormatter) FormatRequest(
+ w io.Writer,
+ config FormatterConfig,
+ auth *logical.Auth,
+ req *logical.Request,
+ inErr error) error {
+
+ if req == nil {
+ return fmt.Errorf("request to request-audit a nil request")
+ }
+
+ if w == nil {
+ return fmt.Errorf("writer for audit request is nil")
+ }
+
+ if f.AuditFormatWriter == nil {
+ return fmt.Errorf("no format writer specified")
+ }
+
+ if !config.Raw {
+ // Before we copy the structure we must nil out some data
+ // otherwise we will cause reflection to panic and die
+ if req.Connection != nil && req.Connection.ConnState != nil {
+ origReq := req
+ origState := req.Connection.ConnState
+ req.Connection.ConnState = nil
+ defer func() {
+ origReq.Connection.ConnState = origState
+ }()
+ }
+
+ // Copy the auth structure
+ if auth != nil {
+ cp, err := copystructure.Copy(auth)
+ if err != nil {
+ return err
+ }
+ auth = cp.(*logical.Auth)
+ }
+
+ cp, err := copystructure.Copy(req)
+ if err != nil {
+ return err
+ }
+ req = cp.(*logical.Request)
+
+ // Hash any sensitive information
+ if auth != nil {
+ if err := Hash(config.Salt, auth); err != nil {
+ return err
+ }
+ }
+
+ // Cache and restore accessor in the request
+ var clientTokenAccessor string
+ if !config.HMACAccessor && req != nil && req.ClientTokenAccessor != "" {
+ clientTokenAccessor = req.ClientTokenAccessor
+ }
+ if err := Hash(config.Salt, req); err != nil {
+ return err
+ }
+ if clientTokenAccessor != "" {
+ req.ClientTokenAccessor = clientTokenAccessor
+ }
+ }
+
+ // If auth is nil, make an empty one
+ if auth == nil {
+ auth = new(logical.Auth)
+ }
+ var errString string
+ if inErr != nil {
+ errString = inErr.Error()
+ }
+
+ reqEntry := &AuditRequestEntry{
+ Type: "request",
+ Error: errString,
+
+ Auth: AuditAuth{
+ DisplayName: auth.DisplayName,
+ Policies: auth.Policies,
+ Metadata: auth.Metadata,
+ RemainingUses: req.ClientTokenRemainingUses,
+ },
+
+ Request: AuditRequest{
+ ID: req.ID,
+ ClientToken: req.ClientToken,
+ ClientTokenAccessor: req.ClientTokenAccessor,
+ Operation: req.Operation,
+ Path: req.Path,
+ Data: req.Data,
+ RemoteAddr: getRemoteAddr(req),
+ ReplicationCluster: req.ReplicationCluster,
+ Headers: req.Headers,
+ },
+ }
+
+ if req.WrapInfo != nil {
+ reqEntry.Request.WrapTTL = int(req.WrapInfo.TTL / time.Second)
+ }
+
+ if !config.OmitTime {
+ reqEntry.Time = time.Now().UTC().Format(time.RFC3339)
+ }
+
+ return f.AuditFormatWriter.WriteRequest(w, reqEntry)
+}
+
+func (f *AuditFormatter) FormatResponse(
+ w io.Writer,
+ config FormatterConfig,
+ auth *logical.Auth,
+ req *logical.Request,
+ resp *logical.Response,
+ inErr error) error {
+
+ if req == nil {
+ return fmt.Errorf("request to response-audit a nil request")
+ }
+
+ if w == nil {
+ return fmt.Errorf("writer for audit request is nil")
+ }
+
+ if f.AuditFormatWriter == nil {
+ return fmt.Errorf("no format writer specified")
+ }
+
+ if !config.Raw {
+ // Before we copy the structure we must nil out some data
+ // otherwise we will cause reflection to panic and die
+ if req.Connection != nil && req.Connection.ConnState != nil {
+ origReq := req
+ origState := req.Connection.ConnState
+ req.Connection.ConnState = nil
+ defer func() {
+ origReq.Connection.ConnState = origState
+ }()
+ }
+
+ // Copy the auth structure
+ if auth != nil {
+ cp, err := copystructure.Copy(auth)
+ if err != nil {
+ return err
+ }
+ auth = cp.(*logical.Auth)
+ }
+
+ cp, err := copystructure.Copy(req)
+ if err != nil {
+ return err
+ }
+ req = cp.(*logical.Request)
+
+ if resp != nil {
+ cp, err := copystructure.Copy(resp)
+ if err != nil {
+ return err
+ }
+ resp = cp.(*logical.Response)
+ }
+
+ // Hash any sensitive information
+
+ // Cache and restore accessor in the auth
+ if auth != nil {
+ var accessor string
+ if !config.HMACAccessor && auth.Accessor != "" {
+ accessor = auth.Accessor
+ }
+ if err := Hash(config.Salt, auth); err != nil {
+ return err
+ }
+ if accessor != "" {
+ auth.Accessor = accessor
+ }
+ }
+
+ // Cache and restore accessor in the request
+ var clientTokenAccessor string
+ if !config.HMACAccessor && req != nil && req.ClientTokenAccessor != "" {
+ clientTokenAccessor = req.ClientTokenAccessor
+ }
+ if err := Hash(config.Salt, req); err != nil {
+ return err
+ }
+ if clientTokenAccessor != "" {
+ req.ClientTokenAccessor = clientTokenAccessor
+ }
+
+ // Cache and restore accessor in the response
+ if resp != nil {
+ var accessor, wrappedAccessor string
+ if !config.HMACAccessor && resp != nil && resp.Auth != nil && resp.Auth.Accessor != "" {
+ accessor = resp.Auth.Accessor
+ }
+ if !config.HMACAccessor && resp != nil && resp.WrapInfo != nil && resp.WrapInfo.WrappedAccessor != "" {
+ wrappedAccessor = resp.WrapInfo.WrappedAccessor
+ }
+ if err := Hash(config.Salt, resp); err != nil {
+ return err
+ }
+ if accessor != "" {
+ resp.Auth.Accessor = accessor
+ }
+ if wrappedAccessor != "" {
+ resp.WrapInfo.WrappedAccessor = wrappedAccessor
+ }
+ }
+ }
+
+ // If things are nil, make empty to avoid panics
+ if auth == nil {
+ auth = new(logical.Auth)
+ }
+ if resp == nil {
+ resp = new(logical.Response)
+ }
+ var errString string
+ if inErr != nil {
+ errString = inErr.Error()
+ }
+
+ var respAuth *AuditAuth
+ if resp.Auth != nil {
+ respAuth = &AuditAuth{
+ ClientToken: resp.Auth.ClientToken,
+ Accessor: resp.Auth.Accessor,
+ DisplayName: resp.Auth.DisplayName,
+ Policies: resp.Auth.Policies,
+ Metadata: resp.Auth.Metadata,
+ NumUses: resp.Auth.NumUses,
+ }
+ }
+
+ var respSecret *AuditSecret
+ if resp.Secret != nil {
+ respSecret = &AuditSecret{
+ LeaseID: resp.Secret.LeaseID,
+ }
+ }
+
+ var respWrapInfo *AuditResponseWrapInfo
+ if resp.WrapInfo != nil {
+ token := resp.WrapInfo.Token
+ if jwtToken := parseVaultTokenFromJWT(token); jwtToken != nil {
+ token = *jwtToken
+ }
+ respWrapInfo = &AuditResponseWrapInfo{
+ TTL: int(resp.WrapInfo.TTL / time.Second),
+ Token: token,
+ CreationTime: resp.WrapInfo.CreationTime.Format(time.RFC3339Nano),
+ WrappedAccessor: resp.WrapInfo.WrappedAccessor,
+ }
+ }
+
+ respEntry := &AuditResponseEntry{
+ Type: "response",
+ Error: errString,
+
+ Auth: AuditAuth{
+ DisplayName: auth.DisplayName,
+ Policies: auth.Policies,
+ Metadata: auth.Metadata,
+ },
+
+ Request: AuditRequest{
+ ID: req.ID,
+ ClientToken: req.ClientToken,
+ ClientTokenAccessor: req.ClientTokenAccessor,
+ Operation: req.Operation,
+ Path: req.Path,
+ Data: req.Data,
+ RemoteAddr: getRemoteAddr(req),
+ ReplicationCluster: req.ReplicationCluster,
+ Headers: req.Headers,
+ },
+
+ Response: AuditResponse{
+ Auth: respAuth,
+ Secret: respSecret,
+ Data: resp.Data,
+ Redirect: resp.Redirect,
+ WrapInfo: respWrapInfo,
+ },
+ }
+
+ if req.WrapInfo != nil {
+ respEntry.Request.WrapTTL = int(req.WrapInfo.TTL / time.Second)
+ }
+
+ if !config.OmitTime {
+ respEntry.Time = time.Now().UTC().Format(time.RFC3339)
+ }
+
+ return f.AuditFormatWriter.WriteResponse(w, respEntry)
+}
+
+// AuditRequest is the structure of a request audit log entry in Audit.
+type AuditRequestEntry struct {
+ Time string `json:"time,omitempty"`
+ Type string `json:"type"`
+ Auth AuditAuth `json:"auth"`
+ Request AuditRequest `json:"request"`
+ Error string `json:"error"`
+}
+
+// AuditResponseEntry is the structure of a response audit log entry in Audit.
+type AuditResponseEntry struct {
+ Time string `json:"time,omitempty"`
+ Type string `json:"type"`
+ Auth AuditAuth `json:"auth"`
+ Request AuditRequest `json:"request"`
+ Response AuditResponse `json:"response"`
+ Error string `json:"error"`
+}
+
+type AuditRequest struct {
+ ID string `json:"id"`
+ ReplicationCluster string `json:"replication_cluster,omitempty"`
+ Operation logical.Operation `json:"operation"`
+ ClientToken string `json:"client_token"`
+ ClientTokenAccessor string `json:"client_token_accessor"`
+ Path string `json:"path"`
+ Data map[string]interface{} `json:"data"`
+ RemoteAddr string `json:"remote_address"`
+ WrapTTL int `json:"wrap_ttl"`
+ Headers map[string][]string `json:"headers"`
+}
+
+type AuditResponse struct {
+ Auth *AuditAuth `json:"auth,omitempty"`
+ Secret *AuditSecret `json:"secret,omitempty"`
+ Data map[string]interface{} `json:"data,omitempty"`
+ Redirect string `json:"redirect,omitempty"`
+ WrapInfo *AuditResponseWrapInfo `json:"wrap_info,omitempty"`
+}
+
+type AuditAuth struct {
+ ClientToken string `json:"client_token"`
+ Accessor string `json:"accessor"`
+ DisplayName string `json:"display_name"`
+ Policies []string `json:"policies"`
+ Metadata map[string]string `json:"metadata"`
+ NumUses int `json:"num_uses,omitempty"`
+ RemainingUses int `json:"remaining_uses,omitempty"`
+}
+
+type AuditSecret struct {
+ LeaseID string `json:"lease_id"`
+}
+
+type AuditResponseWrapInfo struct {
+ TTL int `json:"ttl"`
+ Token string `json:"token"`
+ CreationTime string `json:"creation_time"`
+ WrappedAccessor string `json:"wrapped_accessor,omitempty"`
+}
+
+// getRemoteAddr safely gets the remote address avoiding a nil pointer
+func getRemoteAddr(req *logical.Request) string {
+ if req != nil && req.Connection != nil {
+ return req.Connection.RemoteAddr
+ }
+ return ""
+}
+
+// parseVaultTokenFromJWT returns a string iff the token was a JWT and we could
+// extract the original token ID from inside
+func parseVaultTokenFromJWT(token string) *string {
+ if strings.Count(token, ".") != 2 {
+ return nil
+ }
+
+ wt, err := jws.ParseJWT([]byte(token))
+ if err != nil || wt == nil {
+ return nil
+ }
+
+ result, _ := wt.Claims().JWTID()
+
+ return &result
+}
diff --git a/vendor/github.com/hashicorp/vault/audit/format_json.go b/vendor/github.com/hashicorp/vault/audit/format_json.go
new file mode 100644
index 0000000..9e200f0
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/audit/format_json.go
@@ -0,0 +1,45 @@
+package audit
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+)
+
+// JSONFormatWriter is an AuditFormatWriter implementation that structures data into
+// a JSON format.
+type JSONFormatWriter struct {
+ Prefix string
+}
+
+func (f *JSONFormatWriter) WriteRequest(w io.Writer, req *AuditRequestEntry) error {
+ if req == nil {
+ return fmt.Errorf("request entry was nil, cannot encode")
+ }
+
+ if len(f.Prefix) > 0 {
+ _, err := w.Write([]byte(f.Prefix))
+ if err != nil {
+ return err
+ }
+ }
+
+ enc := json.NewEncoder(w)
+ return enc.Encode(req)
+}
+
+func (f *JSONFormatWriter) WriteResponse(w io.Writer, resp *AuditResponseEntry) error {
+ if resp == nil {
+ return fmt.Errorf("response entry was nil, cannot encode")
+ }
+
+ if len(f.Prefix) > 0 {
+ _, err := w.Write([]byte(f.Prefix))
+ if err != nil {
+ return err
+ }
+ }
+
+ enc := json.NewEncoder(w)
+ return enc.Encode(resp)
+}
diff --git a/vendor/github.com/hashicorp/vault/audit/format_json_test.go b/vendor/github.com/hashicorp/vault/audit/format_json_test.go
new file mode 100644
index 0000000..21bb647
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/audit/format_json_test.go
@@ -0,0 +1,110 @@
+package audit
+
+import (
+ "bytes"
+ "encoding/json"
+ "strings"
+ "testing"
+ "time"
+
+ "errors"
+
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/helper/salt"
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestFormatJSON_formatRequest(t *testing.T) {
+ cases := map[string]struct {
+ Auth *logical.Auth
+ Req *logical.Request
+ Err error
+ Prefix string
+ Result string
+ }{
+ "auth, request": {
+ &logical.Auth{ClientToken: "foo", Policies: []string{"root"}},
+ &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "/foo",
+ Connection: &logical.Connection{
+ RemoteAddr: "127.0.0.1",
+ },
+ WrapInfo: &logical.RequestWrapInfo{
+ TTL: 60 * time.Second,
+ },
+ Headers: map[string][]string{
+ "foo": []string{"bar"},
+ },
+ },
+ errors.New("this is an error"),
+ "",
+ testFormatJSONReqBasicStr,
+ },
+ "auth, request with prefix": {
+ &logical.Auth{ClientToken: "foo", Policies: []string{"root"}},
+ &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "/foo",
+ Connection: &logical.Connection{
+ RemoteAddr: "127.0.0.1",
+ },
+ WrapInfo: &logical.RequestWrapInfo{
+ TTL: 60 * time.Second,
+ },
+ Headers: map[string][]string{
+ "foo": []string{"bar"},
+ },
+ },
+ errors.New("this is an error"),
+ "@cee: ",
+ testFormatJSONReqBasicStr,
+ },
+ }
+
+ for name, tc := range cases {
+ var buf bytes.Buffer
+ formatter := AuditFormatter{
+ AuditFormatWriter: &JSONFormatWriter{
+ Prefix: tc.Prefix,
+ },
+ }
+ salter, _ := salt.NewSalt(nil, nil)
+ config := FormatterConfig{
+ Salt: salter,
+ }
+ if err := formatter.FormatRequest(&buf, config, tc.Auth, tc.Req, tc.Err); err != nil {
+ t.Fatalf("bad: %s\nerr: %s", name, err)
+ }
+
+ if !strings.HasPrefix(buf.String(), tc.Prefix) {
+ t.Fatalf("no prefix: %s \n log: %s\nprefix: %s", name, tc.Result, tc.Prefix)
+ }
+
+ var expectedjson = new(AuditRequestEntry)
+ if err := jsonutil.DecodeJSON([]byte(tc.Result), &expectedjson); err != nil {
+ t.Fatalf("bad json: %s", err)
+ }
+
+ var actualjson = new(AuditRequestEntry)
+ if err := jsonutil.DecodeJSON([]byte(buf.String())[len(tc.Prefix):], &actualjson); err != nil {
+ t.Fatalf("bad json: %s", err)
+ }
+
+ expectedjson.Time = actualjson.Time
+
+ expectedBytes, err := json.Marshal(expectedjson)
+ if err != nil {
+ t.Fatalf("unable to marshal json: %s", err)
+ }
+
+ if !strings.HasSuffix(strings.TrimSpace(buf.String()), string(expectedBytes)) {
+ t.Fatalf(
+ "bad: %s\nResult:\n\n'%s'\n\nExpected:\n\n'%s'",
+ name, buf.String(), string(expectedBytes))
+ }
+ }
+}
+
+const testFormatJSONReqBasicStr = `{"time":"2015-08-05T13:45:46Z","type":"request","auth":{"display_name":"","policies":["root"],"metadata":null},"request":{"operation":"update","path":"/foo","data":null,"wrap_ttl":60,"remote_address":"127.0.0.1","headers":{"foo":["bar"]}},"error":"this is an error"}
+`
diff --git a/vendor/github.com/hashicorp/vault/audit/format_jsonx.go b/vendor/github.com/hashicorp/vault/audit/format_jsonx.go
new file mode 100644
index 0000000..cc6cc95
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/audit/format_jsonx.go
@@ -0,0 +1,67 @@
+package audit
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/jefferai/jsonx"
+)
+
+// JSONxFormatWriter is an AuditFormatWriter implementation that structures data into
+// a XML format.
+type JSONxFormatWriter struct {
+ Prefix string
+}
+
+func (f *JSONxFormatWriter) WriteRequest(w io.Writer, req *AuditRequestEntry) error {
+ if req == nil {
+ return fmt.Errorf("request entry was nil, cannot encode")
+ }
+
+ if len(f.Prefix) > 0 {
+ _, err := w.Write([]byte(f.Prefix))
+ if err != nil {
+ return err
+ }
+ }
+
+ jsonBytes, err := json.Marshal(req)
+ if err != nil {
+ return err
+ }
+
+ xmlBytes, err := jsonx.EncodeJSONBytes(jsonBytes)
+ if err != nil {
+ return err
+ }
+
+ _, err = w.Write(xmlBytes)
+ return err
+}
+
+func (f *JSONxFormatWriter) WriteResponse(w io.Writer, resp *AuditResponseEntry) error {
+ if resp == nil {
+ return fmt.Errorf("response entry was nil, cannot encode")
+ }
+
+ if len(f.Prefix) > 0 {
+ _, err := w.Write([]byte(f.Prefix))
+ if err != nil {
+ return err
+ }
+ }
+
+ jsonBytes, err := json.Marshal(resp)
+ if err != nil {
+ return err
+ }
+
+ xmlBytes, err := jsonx.EncodeJSONBytes(jsonBytes)
+ if err != nil {
+ return err
+ }
+
+ _, err = w.Write(xmlBytes)
+ return err
+}
diff --git a/vendor/github.com/hashicorp/vault/audit/format_jsonx_test.go b/vendor/github.com/hashicorp/vault/audit/format_jsonx_test.go
new file mode 100644
index 0000000..8d4fe4b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/audit/format_jsonx_test.go
@@ -0,0 +1,92 @@
+package audit
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+ "time"
+
+ "errors"
+
+ "github.com/hashicorp/vault/helper/salt"
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestFormatJSONx_formatRequest(t *testing.T) {
+ cases := map[string]struct {
+ Auth *logical.Auth
+ Req *logical.Request
+ Err error
+ Prefix string
+ Result string
+ Expected string
+ }{
+ "auth, request": {
+ &logical.Auth{ClientToken: "foo", Policies: []string{"root"}},
+ &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "/foo",
+ Connection: &logical.Connection{
+ RemoteAddr: "127.0.0.1",
+ },
+ WrapInfo: &logical.RequestWrapInfo{
+ TTL: 60 * time.Second,
+ },
+ Headers: map[string][]string{
+ "foo": []string{"bar"},
+ },
+ },
+ errors.New("this is an error"),
+ "",
+ "",
+ `rootthis is an errorbarupdate/foo127.0.0.160request`,
+ },
+ "auth, request with prefix": {
+ &logical.Auth{ClientToken: "foo", Policies: []string{"root"}},
+ &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "/foo",
+ Connection: &logical.Connection{
+ RemoteAddr: "127.0.0.1",
+ },
+ WrapInfo: &logical.RequestWrapInfo{
+ TTL: 60 * time.Second,
+ },
+ Headers: map[string][]string{
+ "foo": []string{"bar"},
+ },
+ },
+ errors.New("this is an error"),
+ "",
+ "@cee: ",
+ `rootthis is an errorbarupdate/foo127.0.0.160request`,
+ },
+ }
+
+ for name, tc := range cases {
+ var buf bytes.Buffer
+ formatter := AuditFormatter{
+ AuditFormatWriter: &JSONxFormatWriter{
+ Prefix: tc.Prefix,
+ },
+ }
+ salter, _ := salt.NewSalt(nil, nil)
+ config := FormatterConfig{
+ Salt: salter,
+ OmitTime: true,
+ }
+ if err := formatter.FormatRequest(&buf, config, tc.Auth, tc.Req, tc.Err); err != nil {
+ t.Fatalf("bad: %s\nerr: %s", name, err)
+ }
+
+ if !strings.HasPrefix(buf.String(), tc.Prefix) {
+ t.Fatalf("no prefix: %s \n log: %s\nprefix: %s", name, tc.Result, tc.Prefix)
+ }
+
+ if !strings.HasSuffix(strings.TrimSpace(buf.String()), string(tc.Expected)) {
+ t.Fatalf(
+ "bad: %s\nResult:\n\n'%s'\n\nExpected:\n\n'%s'",
+ name, strings.TrimSpace(buf.String()), string(tc.Expected))
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/audit/format_test.go b/vendor/github.com/hashicorp/vault/audit/format_test.go
new file mode 100644
index 0000000..6a6425b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/audit/format_test.go
@@ -0,0 +1,55 @@
+package audit
+
+import (
+ "io"
+ "io/ioutil"
+ "testing"
+
+ "github.com/hashicorp/vault/helper/salt"
+ "github.com/hashicorp/vault/logical"
+)
+
+type noopFormatWriter struct {
+}
+
+func (n *noopFormatWriter) WriteRequest(_ io.Writer, _ *AuditRequestEntry) error {
+ return nil
+}
+
+func (n *noopFormatWriter) WriteResponse(_ io.Writer, _ *AuditResponseEntry) error {
+ return nil
+}
+
+func TestFormatRequestErrors(t *testing.T) {
+ salter, _ := salt.NewSalt(nil, nil)
+ config := FormatterConfig{
+ Salt: salter,
+ }
+ formatter := AuditFormatter{
+ AuditFormatWriter: &noopFormatWriter{},
+ }
+
+ if err := formatter.FormatRequest(ioutil.Discard, config, nil, nil, nil); err == nil {
+ t.Fatal("expected error due to nil request")
+ }
+ if err := formatter.FormatRequest(nil, config, nil, &logical.Request{}, nil); err == nil {
+ t.Fatal("expected error due to nil writer")
+ }
+}
+
+func TestFormatResponseErrors(t *testing.T) {
+ salter, _ := salt.NewSalt(nil, nil)
+ config := FormatterConfig{
+ Salt: salter,
+ }
+ formatter := AuditFormatter{
+ AuditFormatWriter: &noopFormatWriter{},
+ }
+
+ if err := formatter.FormatResponse(ioutil.Discard, config, nil, nil, nil, nil); err == nil {
+ t.Fatal("expected error due to nil request")
+ }
+ if err := formatter.FormatResponse(nil, config, nil, &logical.Request{}, nil, nil); err == nil {
+ t.Fatal("expected error due to nil writer")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/audit/formatter.go b/vendor/github.com/hashicorp/vault/audit/formatter.go
new file mode 100644
index 0000000..318bd1b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/audit/formatter.go
@@ -0,0 +1,27 @@
+package audit
+
+import (
+ "io"
+
+ "github.com/hashicorp/vault/helper/salt"
+ "github.com/hashicorp/vault/logical"
+)
+
+// Formatter is an interface that is responsible for formating a
+// request/response into some format. Formatters write their output
+// to an io.Writer.
+//
+// It is recommended that you pass data through Hash prior to formatting it.
+type Formatter interface {
+ FormatRequest(io.Writer, FormatterConfig, *logical.Auth, *logical.Request, error) error
+ FormatResponse(io.Writer, FormatterConfig, *logical.Auth, *logical.Request, *logical.Response, error) error
+}
+
+type FormatterConfig struct {
+ Raw bool
+ Salt *salt.Salt
+ HMACAccessor bool
+
+ // This should only ever be used in a testing context
+ OmitTime bool
+}
diff --git a/vendor/github.com/hashicorp/vault/audit/hashstructure.go b/vendor/github.com/hashicorp/vault/audit/hashstructure.go
new file mode 100644
index 0000000..8d0fd7c
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/audit/hashstructure.go
@@ -0,0 +1,270 @@
+package audit
+
+import (
+ "reflect"
+ "strings"
+
+ "github.com/hashicorp/vault/helper/salt"
+ "github.com/hashicorp/vault/logical"
+ "github.com/mitchellh/copystructure"
+ "github.com/mitchellh/reflectwalk"
+)
+
+// HashString hashes the given opaque string and returns it
+func HashString(salter *salt.Salt, data string) string {
+ return salter.GetIdentifiedHMAC(data)
+}
+
+// Hash will hash the given type. This has built-in support for auth,
+// requests, and responses. If it is a type that isn't recognized, then
+// it will be passed through.
+//
+// The structure is modified in-place.
+func Hash(salter *salt.Salt, raw interface{}) error {
+ fn := salter.GetIdentifiedHMAC
+
+ switch s := raw.(type) {
+ case *logical.Auth:
+ if s == nil {
+ return nil
+ }
+ if s.ClientToken != "" {
+ s.ClientToken = fn(s.ClientToken)
+ }
+ if s.Accessor != "" {
+ s.Accessor = fn(s.Accessor)
+ }
+
+ case *logical.Request:
+ if s == nil {
+ return nil
+ }
+ if s.Auth != nil {
+ if err := Hash(salter, s.Auth); err != nil {
+ return err
+ }
+ }
+
+ if s.ClientToken != "" {
+ s.ClientToken = fn(s.ClientToken)
+ }
+
+ if s.ClientTokenAccessor != "" {
+ s.ClientTokenAccessor = fn(s.ClientTokenAccessor)
+ }
+
+ data, err := HashStructure(s.Data, fn)
+ if err != nil {
+ return err
+ }
+
+ s.Data = data.(map[string]interface{})
+
+ case *logical.Response:
+ if s == nil {
+ return nil
+ }
+
+ if s.Auth != nil {
+ if err := Hash(salter, s.Auth); err != nil {
+ return err
+ }
+ }
+
+ if s.WrapInfo != nil {
+ if err := Hash(salter, s.WrapInfo); err != nil {
+ return err
+ }
+ }
+
+ data, err := HashStructure(s.Data, fn)
+ if err != nil {
+ return err
+ }
+
+ s.Data = data.(map[string]interface{})
+
+ case *logical.ResponseWrapInfo:
+ if s == nil {
+ return nil
+ }
+
+ s.Token = fn(s.Token)
+
+ if s.WrappedAccessor != "" {
+ s.WrappedAccessor = fn(s.WrappedAccessor)
+ }
+ }
+
+ return nil
+}
+
+// HashStructure takes an interface and hashes all the values within
+// the structure. Only _values_ are hashed: keys of objects are not.
+//
+// For the HashCallback, see the built-in HashCallbacks below.
+func HashStructure(s interface{}, cb HashCallback) (interface{}, error) {
+ s, err := copystructure.Copy(s)
+ if err != nil {
+ return nil, err
+ }
+
+ walker := &hashWalker{Callback: cb}
+ if err := reflectwalk.Walk(s, walker); err != nil {
+ return nil, err
+ }
+
+ return s, nil
+}
+
+// HashCallback is the callback called for HashStructure to hash
+// a value.
+type HashCallback func(string) string
+
+// hashWalker implements interfaces for the reflectwalk package
+// (github.com/mitchellh/reflectwalk) that can be used to automatically
+// replace primitives with a hashed value.
+type hashWalker struct {
+ // Callback is the function to call with the primitive that is
+ // to be hashed. If there is an error, walking will be halted
+ // immediately and the error returned.
+ Callback HashCallback
+
+ key []string
+ lastValue reflect.Value
+ loc reflectwalk.Location
+ cs []reflect.Value
+ csKey []reflect.Value
+ csData interface{}
+ sliceIndex int
+ unknownKeys []string
+}
+
+func (w *hashWalker) Enter(loc reflectwalk.Location) error {
+ w.loc = loc
+ return nil
+}
+
+func (w *hashWalker) Exit(loc reflectwalk.Location) error {
+ w.loc = reflectwalk.None
+
+ switch loc {
+ case reflectwalk.Map:
+ w.cs = w.cs[:len(w.cs)-1]
+ case reflectwalk.MapValue:
+ w.key = w.key[:len(w.key)-1]
+ w.csKey = w.csKey[:len(w.csKey)-1]
+ case reflectwalk.Slice:
+ w.cs = w.cs[:len(w.cs)-1]
+ case reflectwalk.SliceElem:
+ w.csKey = w.csKey[:len(w.csKey)-1]
+ }
+
+ return nil
+}
+
+func (w *hashWalker) Map(m reflect.Value) error {
+ w.cs = append(w.cs, m)
+ return nil
+}
+
+func (w *hashWalker) MapElem(m, k, v reflect.Value) error {
+ w.csData = k
+ w.csKey = append(w.csKey, k)
+ w.key = append(w.key, k.String())
+ w.lastValue = v
+ return nil
+}
+
+func (w *hashWalker) Slice(s reflect.Value) error {
+ w.cs = append(w.cs, s)
+ return nil
+}
+
+func (w *hashWalker) SliceElem(i int, elem reflect.Value) error {
+ w.csKey = append(w.csKey, reflect.ValueOf(i))
+ w.sliceIndex = i
+ return nil
+}
+
+func (w *hashWalker) Primitive(v reflect.Value) error {
+ if w.Callback == nil {
+ return nil
+ }
+
+ // We don't touch map keys
+ if w.loc == reflectwalk.MapKey {
+ return nil
+ }
+
+ setV := v
+
+ // We only care about strings
+ if v.Kind() == reflect.Interface {
+ setV = v
+ v = v.Elem()
+ }
+ if v.Kind() != reflect.String {
+ return nil
+ }
+
+ replaceVal := w.Callback(v.String())
+
+ resultVal := reflect.ValueOf(replaceVal)
+ switch w.loc {
+ case reflectwalk.MapKey:
+ m := w.cs[len(w.cs)-1]
+
+ // Delete the old value
+ var zero reflect.Value
+ m.SetMapIndex(w.csData.(reflect.Value), zero)
+
+ // Set the new key with the existing value
+ m.SetMapIndex(resultVal, w.lastValue)
+
+ // Set the key to be the new key
+ w.csData = resultVal
+ case reflectwalk.MapValue:
+ // If we're in a map, then the only way to set a map value is
+ // to set it directly.
+ m := w.cs[len(w.cs)-1]
+ mk := w.csData.(reflect.Value)
+ m.SetMapIndex(mk, resultVal)
+ default:
+ // Otherwise, we should be addressable
+ setV.Set(resultVal)
+ }
+
+ return nil
+}
+
+func (w *hashWalker) removeCurrent() {
+ // Append the key to the unknown keys
+ w.unknownKeys = append(w.unknownKeys, strings.Join(w.key, "."))
+
+ for i := 1; i <= len(w.cs); i++ {
+ c := w.cs[len(w.cs)-i]
+ switch c.Kind() {
+ case reflect.Map:
+ // Zero value so that we delete the map key
+ var val reflect.Value
+
+ // Get the key and delete it
+ k := w.csData.(reflect.Value)
+ c.SetMapIndex(k, val)
+ return
+ }
+ }
+
+ panic("No container found for removeCurrent")
+}
+
+func (w *hashWalker) replaceCurrent(v reflect.Value) {
+ c := w.cs[len(w.cs)-2]
+ switch c.Kind() {
+ case reflect.Map:
+ // Get the key and delete it
+ k := w.csKey[len(w.csKey)-1]
+ c.SetMapIndex(k, v)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/audit/hashstructure_test.go b/vendor/github.com/hashicorp/vault/audit/hashstructure_test.go
new file mode 100644
index 0000000..5fefa0f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/audit/hashstructure_test.go
@@ -0,0 +1,246 @@
+package audit
+
+import (
+ "crypto/sha256"
+ "fmt"
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/helper/certutil"
+ "github.com/hashicorp/vault/helper/salt"
+ "github.com/hashicorp/vault/logical"
+ "github.com/mitchellh/copystructure"
+)
+
+func TestCopy_auth(t *testing.T) {
+ // Make a non-pointer one so that it can't be modified directly
+ expected := logical.Auth{
+ LeaseOptions: logical.LeaseOptions{
+ TTL: 1 * time.Hour,
+ IssueTime: time.Now(),
+ },
+
+ ClientToken: "foo",
+ }
+ auth := expected
+
+ // Copy it
+ dup, err := copystructure.Copy(&auth)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Check equality
+ auth2 := dup.(*logical.Auth)
+ if !reflect.DeepEqual(*auth2, expected) {
+ t.Fatalf("bad:\n\n%#v\n\n%#v", *auth2, expected)
+ }
+}
+
+func TestCopy_request(t *testing.T) {
+ // Make a non-pointer one so that it can't be modified directly
+ expected := logical.Request{
+ Data: map[string]interface{}{
+ "foo": "bar",
+ },
+ WrapInfo: &logical.RequestWrapInfo{
+ TTL: 60 * time.Second,
+ },
+ }
+ arg := expected
+
+ // Copy it
+ dup, err := copystructure.Copy(&arg)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Check equality
+ arg2 := dup.(*logical.Request)
+ if !reflect.DeepEqual(*arg2, expected) {
+ t.Fatalf("bad:\n\n%#v\n\n%#v", *arg2, expected)
+ }
+}
+
+func TestCopy_response(t *testing.T) {
+ // Make a non-pointer one so that it can't be modified directly
+ expected := logical.Response{
+ Data: map[string]interface{}{
+ "foo": "bar",
+ },
+ WrapInfo: &logical.ResponseWrapInfo{
+ TTL: 60,
+ Token: "foo",
+ CreationTime: time.Now(),
+ WrappedAccessor: "abcd1234",
+ },
+ }
+ arg := expected
+
+ // Copy it
+ dup, err := copystructure.Copy(&arg)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Check equality
+ arg2 := dup.(*logical.Response)
+ if !reflect.DeepEqual(*arg2, expected) {
+ t.Fatalf("bad:\n\n%#v\n\n%#v", *arg2, expected)
+ }
+}
+
+func TestHashString(t *testing.T) {
+ inmemStorage := &logical.InmemStorage{}
+ inmemStorage.Put(&logical.StorageEntry{
+ Key: "salt",
+ Value: []byte("foo"),
+ })
+ localSalt, err := salt.NewSalt(inmemStorage, &salt.Config{
+ HMAC: sha256.New,
+ HMACType: "hmac-sha256",
+ })
+ if err != nil {
+ t.Fatalf("Error instantiating salt: %s", err)
+ }
+ out := HashString(localSalt, "foo")
+ if out != "hmac-sha256:08ba357e274f528065766c770a639abf6809b39ccfd37c2a3157c7f51954da0a" {
+ t.Fatalf("err: HashString output did not match expected")
+ }
+}
+
+func TestHash(t *testing.T) {
+ now := time.Now()
+
+ cases := []struct {
+ Input interface{}
+ Output interface{}
+ }{
+ {
+ &logical.Auth{ClientToken: "foo"},
+ &logical.Auth{ClientToken: "hmac-sha256:08ba357e274f528065766c770a639abf6809b39ccfd37c2a3157c7f51954da0a"},
+ },
+ {
+ &logical.Request{
+ Data: map[string]interface{}{
+ "foo": "bar",
+ "private_key_type": certutil.PrivateKeyType("rsa"),
+ },
+ },
+ &logical.Request{
+ Data: map[string]interface{}{
+ "foo": "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317",
+ "private_key_type": "hmac-sha256:995230dca56fffd310ff591aa404aab52b2abb41703c787cfa829eceb4595bf1",
+ },
+ },
+ },
+ {
+ &logical.Response{
+ Data: map[string]interface{}{
+ "foo": "bar",
+ },
+ WrapInfo: &logical.ResponseWrapInfo{
+ TTL: 60,
+ Token: "bar",
+ CreationTime: now,
+ WrappedAccessor: "bar",
+ },
+ },
+ &logical.Response{
+ Data: map[string]interface{}{
+ "foo": "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317",
+ },
+ WrapInfo: &logical.ResponseWrapInfo{
+ TTL: 60,
+ Token: "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317",
+ CreationTime: now,
+ WrappedAccessor: "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317",
+ },
+ },
+ },
+ {
+ "foo",
+ "foo",
+ },
+ {
+ &logical.Auth{
+ LeaseOptions: logical.LeaseOptions{
+ TTL: 1 * time.Hour,
+ IssueTime: now,
+ },
+
+ ClientToken: "foo",
+ },
+ &logical.Auth{
+ LeaseOptions: logical.LeaseOptions{
+ TTL: 1 * time.Hour,
+ IssueTime: now,
+ },
+
+ ClientToken: "hmac-sha256:08ba357e274f528065766c770a639abf6809b39ccfd37c2a3157c7f51954da0a",
+ },
+ },
+ }
+
+ inmemStorage := &logical.InmemStorage{}
+ inmemStorage.Put(&logical.StorageEntry{
+ Key: "salt",
+ Value: []byte("foo"),
+ })
+ localSalt, err := salt.NewSalt(inmemStorage, &salt.Config{
+ HMAC: sha256.New,
+ HMACType: "hmac-sha256",
+ })
+ if err != nil {
+ t.Fatalf("Error instantiating salt: %s", err)
+ }
+ for _, tc := range cases {
+ input := fmt.Sprintf("%#v", tc.Input)
+ if err := Hash(localSalt, tc.Input); err != nil {
+ t.Fatalf("err: %s\n\n%s", err, input)
+ }
+ if !reflect.DeepEqual(tc.Input, tc.Output) {
+ t.Fatalf("bad:\nInput:\n%s\nTest case input:\n%#v\nTest case output\n%#v", input, tc.Input, tc.Output)
+ }
+ }
+}
+
+func TestHashWalker(t *testing.T) {
+ replaceText := "foo"
+
+ cases := []struct {
+ Input interface{}
+ Output interface{}
+ }{
+ {
+ map[string]interface{}{
+ "hello": "foo",
+ },
+ map[string]interface{}{
+ "hello": replaceText,
+ },
+ },
+
+ {
+ map[string]interface{}{
+ "hello": []interface{}{"world"},
+ },
+ map[string]interface{}{
+ "hello": []interface{}{replaceText},
+ },
+ },
+ }
+
+ for _, tc := range cases {
+ output, err := HashStructure(tc.Input, func(string) string {
+ return replaceText
+ })
+ if err != nil {
+ t.Fatalf("err: %s\n\n%#v", err, tc.Input)
+ }
+ if !reflect.DeepEqual(output, tc.Output) {
+ t.Fatalf("bad:\n\n%#v\n\n%#v", tc.Input, output)
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/audit/file/backend.go b/vendor/github.com/hashicorp/vault/builtin/audit/file/backend.go
new file mode 100644
index 0000000..cc2cfe5
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/audit/file/backend.go
@@ -0,0 +1,191 @@
+package file
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "sync"
+
+ "github.com/hashicorp/vault/audit"
+ "github.com/hashicorp/vault/logical"
+)
+
+func Factory(conf *audit.BackendConfig) (audit.Backend, error) {
+ if conf.Salt == nil {
+ return nil, fmt.Errorf("nil salt")
+ }
+
+ path, ok := conf.Config["file_path"]
+ if !ok {
+ path, ok = conf.Config["path"]
+ if !ok {
+ return nil, fmt.Errorf("file_path is required")
+ }
+ }
+
+ format, ok := conf.Config["format"]
+ if !ok {
+ format = "json"
+ }
+ switch format {
+ case "json", "jsonx":
+ default:
+ return nil, fmt.Errorf("unknown format type %s", format)
+ }
+
+ // Check if hashing of accessor is disabled
+ hmacAccessor := true
+ if hmacAccessorRaw, ok := conf.Config["hmac_accessor"]; ok {
+ value, err := strconv.ParseBool(hmacAccessorRaw)
+ if err != nil {
+ return nil, err
+ }
+ hmacAccessor = value
+ }
+
+ // Check if raw logging is enabled
+ logRaw := false
+ if raw, ok := conf.Config["log_raw"]; ok {
+ b, err := strconv.ParseBool(raw)
+ if err != nil {
+ return nil, err
+ }
+ logRaw = b
+ }
+
+ // Check if mode is provided
+ mode := os.FileMode(0600)
+ if modeRaw, ok := conf.Config["mode"]; ok {
+ m, err := strconv.ParseUint(modeRaw, 8, 32)
+ if err != nil {
+ return nil, err
+ }
+ mode = os.FileMode(m)
+ }
+
+ b := &Backend{
+ path: path,
+ mode: mode,
+ formatConfig: audit.FormatterConfig{
+ Raw: logRaw,
+ Salt: conf.Salt,
+ HMACAccessor: hmacAccessor,
+ },
+ }
+
+ switch format {
+ case "json":
+ b.formatter.AuditFormatWriter = &audit.JSONFormatWriter{
+ Prefix: conf.Config["prefix"],
+ }
+ case "jsonx":
+ b.formatter.AuditFormatWriter = &audit.JSONxFormatWriter{
+ Prefix: conf.Config["prefix"],
+ }
+ }
+
+ // Ensure that the file can be successfully opened for writing;
+ // otherwise it will be too late to catch later without problems
+ // (ref: https://github.com/hashicorp/vault/issues/550)
+ if err := b.open(); err != nil {
+ return nil, fmt.Errorf("sanity check failed; unable to open %s for writing: %v", path, err)
+ }
+
+ return b, nil
+}
+
+// Backend is the audit backend for the file-based audit store.
+//
+// NOTE: This audit backend is currently very simple: it appends to a file.
+// It doesn't do anything more at the moment to assist with rotation
+// or reset the write cursor, this should be done in the future.
+type Backend struct {
+ path string
+
+ formatter audit.AuditFormatter
+ formatConfig audit.FormatterConfig
+
+ fileLock sync.RWMutex
+ f *os.File
+ mode os.FileMode
+}
+
+func (b *Backend) GetHash(data string) string {
+ return audit.HashString(b.formatConfig.Salt, data)
+}
+
+func (b *Backend) LogRequest(auth *logical.Auth, req *logical.Request, outerErr error) error {
+ b.fileLock.Lock()
+ defer b.fileLock.Unlock()
+
+ if err := b.open(); err != nil {
+ return err
+ }
+
+ return b.formatter.FormatRequest(b.f, b.formatConfig, auth, req, outerErr)
+}
+
+func (b *Backend) LogResponse(
+ auth *logical.Auth,
+ req *logical.Request,
+ resp *logical.Response,
+ err error) error {
+
+ b.fileLock.Lock()
+ defer b.fileLock.Unlock()
+
+ if err := b.open(); err != nil {
+ return err
+ }
+
+ return b.formatter.FormatResponse(b.f, b.formatConfig, auth, req, resp, err)
+}
+
+// The file lock must be held before calling this
+func (b *Backend) open() error {
+ if b.f != nil {
+ return nil
+ }
+ if err := os.MkdirAll(filepath.Dir(b.path), b.mode); err != nil {
+ return err
+ }
+
+ var err error
+ b.f, err = os.OpenFile(b.path, os.O_APPEND|os.O_WRONLY|os.O_CREATE, b.mode)
+ if err != nil {
+ return err
+ }
+
+ // Change the file mode in case the log file already existed. We special
+ // case /dev/null since we can't chmod it
+ switch b.path {
+ case "/dev/null":
+ default:
+ err = os.Chmod(b.path, b.mode)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (b *Backend) Reload() error {
+ b.fileLock.Lock()
+ defer b.fileLock.Unlock()
+
+ if b.f == nil {
+ return b.open()
+ }
+
+ err := b.f.Close()
+ // Set to nil here so that even if we error out, on the next access open()
+ // will be tried
+ b.f = nil
+ if err != nil {
+ return err
+ }
+
+ return b.open()
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/audit/file/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/audit/file/backend_test.go
new file mode 100644
index 0000000..0a1a8c7
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/audit/file/backend_test.go
@@ -0,0 +1,85 @@
+package file
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "testing"
+
+ "github.com/hashicorp/vault/audit"
+ "github.com/hashicorp/vault/helper/salt"
+)
+
+func TestAuditFile_fileModeNew(t *testing.T) {
+ salter, _ := salt.NewSalt(nil, nil)
+
+ modeStr := "0777"
+ mode, err := strconv.ParseUint(modeStr, 8, 32)
+
+ path, err := ioutil.TempDir("", "vault-test_audit_file-file_mode_new")
+ defer os.RemoveAll(path)
+
+ file := filepath.Join(path, "auditTest.txt")
+
+ config := map[string]string{
+ "path": file,
+ "mode": modeStr,
+ }
+
+ _, err = Factory(&audit.BackendConfig{
+ Salt: salter,
+ Config: config,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ info, err := os.Stat(file)
+ if err != nil {
+ t.Fatalf("Cannot retrieve file mode from `Stat`")
+ }
+ if info.Mode() != os.FileMode(mode) {
+ t.Fatalf("File mode does not match.")
+ }
+}
+
+func TestAuditFile_fileModeExisting(t *testing.T) {
+ salter, _ := salt.NewSalt(nil, nil)
+
+ f, err := ioutil.TempFile("", "test")
+ if err != nil {
+ t.Fatalf("Failure to create test file.")
+ }
+ defer os.Remove(f.Name())
+
+ err = os.Chmod(f.Name(), 0777)
+ if err != nil {
+ t.Fatalf("Failure to chmod temp file for testing.")
+ }
+
+ err = f.Close()
+ if err != nil {
+ t.Fatalf("Failure to close temp file for test.")
+ }
+
+ config := map[string]string{
+ "path": f.Name(),
+ }
+
+ _, err = Factory(&audit.BackendConfig{
+ Salt: salter,
+ Config: config,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ info, err := os.Stat(f.Name())
+ if err != nil {
+ t.Fatalf("cannot retrieve file mode from `Stat`")
+ }
+ if info.Mode() != os.FileMode(0600) {
+ t.Fatalf("File mode does not match.")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/audit/socket/backend.go b/vendor/github.com/hashicorp/vault/builtin/audit/socket/backend.go
new file mode 100644
index 0000000..91e701e
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/audit/socket/backend.go
@@ -0,0 +1,200 @@
+package socket
+
+import (
+ "bytes"
+ "fmt"
+ "net"
+ "strconv"
+ "sync"
+ "time"
+
+ multierror "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/vault/audit"
+ "github.com/hashicorp/vault/helper/parseutil"
+ "github.com/hashicorp/vault/logical"
+)
+
+func Factory(conf *audit.BackendConfig) (audit.Backend, error) {
+ if conf.Salt == nil {
+ return nil, fmt.Errorf("nil salt passed in")
+ }
+
+ address, ok := conf.Config["address"]
+ if !ok {
+ return nil, fmt.Errorf("address is required")
+ }
+
+ socketType, ok := conf.Config["socket_type"]
+ if !ok {
+ socketType = "tcp"
+ }
+
+ writeDeadline, ok := conf.Config["write_timeout"]
+ if !ok {
+ writeDeadline = "2s"
+ }
+ writeDuration, err := parseutil.ParseDurationSecond(writeDeadline)
+ if err != nil {
+ return nil, err
+ }
+
+ format, ok := conf.Config["format"]
+ if !ok {
+ format = "json"
+ }
+ switch format {
+ case "json", "jsonx":
+ default:
+ return nil, fmt.Errorf("unknown format type %s", format)
+ }
+
+ // Check if hashing of accessor is disabled
+ hmacAccessor := true
+ if hmacAccessorRaw, ok := conf.Config["hmac_accessor"]; ok {
+ value, err := strconv.ParseBool(hmacAccessorRaw)
+ if err != nil {
+ return nil, err
+ }
+ hmacAccessor = value
+ }
+
+ // Check if raw logging is enabled
+ logRaw := false
+ if raw, ok := conf.Config["log_raw"]; ok {
+ b, err := strconv.ParseBool(raw)
+ if err != nil {
+ return nil, err
+ }
+ logRaw = b
+ }
+
+ conn, err := net.Dial(socketType, address)
+ if err != nil {
+ return nil, err
+ }
+
+ b := &Backend{
+ connection: conn,
+ formatConfig: audit.FormatterConfig{
+ Raw: logRaw,
+ Salt: conf.Salt,
+ HMACAccessor: hmacAccessor,
+ },
+ writeDuration: writeDuration,
+ address: address,
+ socketType: socketType,
+ }
+
+ switch format {
+ case "json":
+ b.formatter.AuditFormatWriter = &audit.JSONFormatWriter{
+ Prefix: conf.Config["prefix"],
+ }
+ case "jsonx":
+ b.formatter.AuditFormatWriter = &audit.JSONxFormatWriter{
+ Prefix: conf.Config["prefix"],
+ }
+ }
+
+ return b, nil
+}
+
+// Backend is the audit backend for the socket audit transport.
+type Backend struct {
+ connection net.Conn
+
+ formatter audit.AuditFormatter
+ formatConfig audit.FormatterConfig
+
+ writeDuration time.Duration
+ address string
+ socketType string
+
+ sync.Mutex
+}
+
+func (b *Backend) GetHash(data string) string {
+ return audit.HashString(b.formatConfig.Salt, data)
+}
+
+func (b *Backend) LogRequest(auth *logical.Auth, req *logical.Request, outerErr error) error {
+ var buf bytes.Buffer
+ if err := b.formatter.FormatRequest(&buf, b.formatConfig, auth, req, outerErr); err != nil {
+ return err
+ }
+
+ b.Lock()
+ defer b.Unlock()
+
+ err := b.write(buf.Bytes())
+ if err != nil {
+ rErr := b.reconnect()
+ if rErr != nil {
+ err = multierror.Append(err, rErr)
+ } else {
+ // Try once more after reconnecting
+ err = b.write(buf.Bytes())
+ }
+ }
+
+ return err
+}
+
+func (b *Backend) LogResponse(auth *logical.Auth, req *logical.Request,
+ resp *logical.Response, outerErr error) error {
+ var buf bytes.Buffer
+ if err := b.formatter.FormatResponse(&buf, b.formatConfig, auth, req, resp, outerErr); err != nil {
+ return err
+ }
+
+ b.Lock()
+ defer b.Unlock()
+
+ err := b.write(buf.Bytes())
+ if err != nil {
+ rErr := b.reconnect()
+ if rErr != nil {
+ err = multierror.Append(err, rErr)
+ } else {
+ // Try once more after reconnecting
+ err = b.write(buf.Bytes())
+ }
+ }
+
+ return err
+}
+
+func (b *Backend) write(buf []byte) error {
+ err := b.connection.SetWriteDeadline(time.Now().Add(b.writeDuration))
+ if err != nil {
+ return err
+ }
+
+ _, err = b.connection.Write(buf)
+ if err != nil {
+ return err
+ }
+
+ return err
+}
+
+func (b *Backend) reconnect() error {
+ conn, err := net.Dial(b.socketType, b.address)
+ if err != nil {
+ return err
+ }
+
+ b.connection.Close()
+ b.connection = conn
+
+ return nil
+}
+
+func (b *Backend) Reload() error {
+ b.Lock()
+ defer b.Unlock()
+
+ err := b.reconnect()
+
+ return err
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/audit/syslog/backend.go b/vendor/github.com/hashicorp/vault/builtin/audit/syslog/backend.go
new file mode 100644
index 0000000..4b1912f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/audit/syslog/backend.go
@@ -0,0 +1,125 @@
+package syslog
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+
+ "github.com/hashicorp/go-syslog"
+ "github.com/hashicorp/vault/audit"
+ "github.com/hashicorp/vault/logical"
+)
+
+func Factory(conf *audit.BackendConfig) (audit.Backend, error) {
+ if conf.Salt == nil {
+ return nil, fmt.Errorf("Nil salt passed in")
+ }
+
+ // Get facility or default to AUTH
+ facility, ok := conf.Config["facility"]
+ if !ok {
+ facility = "AUTH"
+ }
+
+ // Get tag or default to 'vault'
+ tag, ok := conf.Config["tag"]
+ if !ok {
+ tag = "vault"
+ }
+
+ format, ok := conf.Config["format"]
+ if !ok {
+ format = "json"
+ }
+ switch format {
+ case "json", "jsonx":
+ default:
+ return nil, fmt.Errorf("unknown format type %s", format)
+ }
+
+ // Check if hashing of accessor is disabled
+ hmacAccessor := true
+ if hmacAccessorRaw, ok := conf.Config["hmac_accessor"]; ok {
+ value, err := strconv.ParseBool(hmacAccessorRaw)
+ if err != nil {
+ return nil, err
+ }
+ hmacAccessor = value
+ }
+
+ // Check if raw logging is enabled
+ logRaw := false
+ if raw, ok := conf.Config["log_raw"]; ok {
+ b, err := strconv.ParseBool(raw)
+ if err != nil {
+ return nil, err
+ }
+ logRaw = b
+ }
+
+ // Get the logger
+ logger, err := gsyslog.NewLogger(gsyslog.LOG_INFO, facility, tag)
+ if err != nil {
+ return nil, err
+ }
+
+ b := &Backend{
+ logger: logger,
+ formatConfig: audit.FormatterConfig{
+ Raw: logRaw,
+ Salt: conf.Salt,
+ HMACAccessor: hmacAccessor,
+ },
+ }
+
+ switch format {
+ case "json":
+ b.formatter.AuditFormatWriter = &audit.JSONFormatWriter{
+ Prefix: conf.Config["prefix"],
+ }
+ case "jsonx":
+ b.formatter.AuditFormatWriter = &audit.JSONxFormatWriter{
+ Prefix: conf.Config["prefix"],
+ }
+ }
+
+ return b, nil
+}
+
+// Backend is the audit backend for the syslog-based audit store.
+type Backend struct {
+ logger gsyslog.Syslogger
+
+ formatter audit.AuditFormatter
+ formatConfig audit.FormatterConfig
+}
+
+func (b *Backend) GetHash(data string) string {
+ return audit.HashString(b.formatConfig.Salt, data)
+}
+
+func (b *Backend) LogRequest(auth *logical.Auth, req *logical.Request, outerErr error) error {
+ var buf bytes.Buffer
+ if err := b.formatter.FormatRequest(&buf, b.formatConfig, auth, req, outerErr); err != nil {
+ return err
+ }
+
+ // Write out to syslog
+ _, err := b.logger.Write(buf.Bytes())
+ return err
+}
+
+func (b *Backend) LogResponse(auth *logical.Auth, req *logical.Request, resp *logical.Response, err error) error {
+ var buf bytes.Buffer
+ if err := b.formatter.FormatResponse(&buf, b.formatConfig, auth, req, resp, err); err != nil {
+ return err
+ }
+
+ // Write out to syslog
+ _, err = b.logger.Write(buf.Bytes())
+ return err
+}
+
+func (b *Backend) Reload() error {
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/app-id/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/app-id/backend.go
new file mode 100644
index 0000000..76d9a6e
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/app-id/backend.go
@@ -0,0 +1,233 @@
+package appId
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/vault/helper/salt"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
+ b, err := Backend(conf)
+ if err != nil {
+ return nil, err
+ }
+ return b.Setup(conf)
+}
+
+func Backend(conf *logical.BackendConfig) (*framework.Backend, error) {
+ var b backend
+ b.MapAppId = &framework.PolicyMap{
+ PathMap: framework.PathMap{
+ Name: "app-id",
+ Schema: map[string]*framework.FieldSchema{
+ "display_name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "A name to map to this app ID for logs.",
+ },
+
+ "value": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Policies for the app ID.",
+ },
+ },
+ },
+ DefaultKey: "default",
+ }
+
+ b.MapUserId = &framework.PathMap{
+ Name: "user-id",
+ Schema: map[string]*framework.FieldSchema{
+ "cidr_block": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "If not blank, restricts auth by this CIDR block",
+ },
+
+ "value": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "App IDs that this user associates with.",
+ },
+ },
+ }
+
+ b.Backend = &framework.Backend{
+ Help: backendHelp,
+
+ PathsSpecial: &logical.Paths{
+ Unauthenticated: []string{
+ "login",
+ "login/*",
+ },
+ },
+
+ Paths: framework.PathAppend([]*framework.Path{
+ pathLogin(&b),
+ pathLoginWithAppIDPath(&b),
+ },
+ b.MapAppId.Paths(),
+ b.MapUserId.Paths(),
+ ),
+
+ AuthRenew: b.pathLoginRenew,
+
+ Init: b.initialize,
+ }
+
+ b.view = conf.StorageView
+
+ return b.Backend, nil
+}
+
+type backend struct {
+ *framework.Backend
+
+ Salt *salt.Salt
+ view logical.Storage
+ MapAppId *framework.PolicyMap
+ MapUserId *framework.PathMap
+}
+
+func (b *backend) initialize() error {
+ salt, err := salt.NewSalt(b.view, &salt.Config{
+ HashFunc: salt.SHA1Hash,
+ })
+ if err != nil {
+ return err
+ }
+ b.Salt = salt
+
+ b.MapAppId.Salt = salt
+ b.MapUserId.Salt = salt
+
+ // Since the salt is new in 0.2, we need to handle this by migrating
+ // any existing keys to use the salt. We can deprecate this eventually,
+ // but for now we want a smooth upgrade experience by automatically
+ // upgrading to use salting.
+ if salt.DidGenerate() {
+ if err := b.upgradeToSalted(b.view); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// upgradeToSalted is used to upgrade the non-salted keys prior to
+// Vault 0.2 to be salted. This is done on mount time and is only
+// done once. It can be deprecated eventually, but should be around
+// long enough for all 0.1.x users to upgrade.
+func (b *backend) upgradeToSalted(view logical.Storage) error {
+ // Create a copy of MapAppId that does not use a Salt
+ nonSaltedAppId := new(framework.PathMap)
+ *nonSaltedAppId = b.MapAppId.PathMap
+ nonSaltedAppId.Salt = nil
+
+ // Get the list of app-ids
+ keys, err := b.MapAppId.List(view, "")
+ if err != nil {
+ return fmt.Errorf("failed to list app-ids: %v", err)
+ }
+
+ // Upgrade all the existing keys
+ for _, key := range keys {
+ val, err := nonSaltedAppId.Get(view, key)
+ if err != nil {
+ return fmt.Errorf("failed to read app-id: %v", err)
+ }
+
+ if err := b.MapAppId.Put(view, key, val); err != nil {
+ return fmt.Errorf("failed to write app-id: %v", err)
+ }
+
+ if err := nonSaltedAppId.Delete(view, key); err != nil {
+ return fmt.Errorf("failed to delete app-id: %v", err)
+ }
+ }
+
+ // Create a copy of MapUserId that does not use a Salt
+ nonSaltedUserId := new(framework.PathMap)
+ *nonSaltedUserId = *b.MapUserId
+ nonSaltedUserId.Salt = nil
+
+ // Get the list of user-ids
+ keys, err = b.MapUserId.List(view, "")
+ if err != nil {
+ return fmt.Errorf("failed to list user-ids: %v", err)
+ }
+
+ // Upgrade all the existing keys
+ for _, key := range keys {
+ val, err := nonSaltedUserId.Get(view, key)
+ if err != nil {
+ return fmt.Errorf("failed to read user-id: %v", err)
+ }
+
+ if err := b.MapUserId.Put(view, key, val); err != nil {
+ return fmt.Errorf("failed to write user-id: %v", err)
+ }
+
+ if err := nonSaltedUserId.Delete(view, key); err != nil {
+ return fmt.Errorf("failed to delete user-id: %v", err)
+ }
+ }
+ return nil
+}
+
+const backendHelp = `
+The App ID credential provider is used to perform authentication from
+within applications or machine by pairing together two hard-to-guess
+unique pieces of information: a unique app ID, and a unique user ID.
+
+The goal of this credential provider is to allow elastic users
+(dynamic machines, containers, etc.) to authenticate with Vault without
+having to store passwords outside of Vault. It is a single method of
+solving the chicken-and-egg problem of setting up Vault access on a machine.
+With this provider, nobody except the machine itself has access to both
+pieces of information necessary to authenticate. For example:
+configuration management will have the app IDs, but the machine itself
+will detect its user ID based on some unique machine property such as a
+MAC address (or a hash of it with some salt).
+
+An example, real world process for using this provider:
+
+ 1. Create unique app IDs (UUIDs work well) and map them to policies.
+ (Path: map/app-id/)
+
+ 2. Store the app IDs within configuration management systems.
+
+ 3. An out-of-band process run by security operators map unique user IDs
+ to these app IDs. Example: when an instance is launched, a cloud-init
+ system tells security operators a unique ID for this machine. This
+ process can be scripted, but the key is that it is out-of-band and
+ out of reach of configuration management.
+ (Path: map/user-id/)
+
+ 4. A new server is provisioned. Configuration management configures the
+ app ID, the server itself detects its user ID. With both of these
+ pieces of information, Vault can be accessed according to the policy
+ set by the app ID.
+
+More details on this process follow:
+
+The app ID is a unique ID that maps to a set of policies. This ID is
+generated by an operator and configured into the backend. The ID itself
+is usually a UUID, but any hard-to-guess unique value can be used.
+
+After creating app IDs, an operator authorizes a fixed set of user IDs
+with each app ID. When a valid {app ID, user ID} tuple is given to the
+"login" path, then the user is authenticated with the configured app
+ID policies.
+
+The user ID can be any value (just like the app ID), however it is
+generally a value unique to a machine, such as a MAC address or instance ID,
+or a value hashed from these unique values.
+
+It is possible to authorize multiple app IDs with each
+user ID by writing them as comma-separated values to the map/user-id/
+path.
+
+It is also possible to renew the auth tokens with 'vault token-renew ' command.
+Before the token is renewed, the validity of app ID, user ID and the associated
+policies are checked again.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/app-id/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/app-id/backend_test.go
new file mode 100644
index 0000000..2960e40
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/app-id/backend_test.go
@@ -0,0 +1,263 @@
+package appId
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/hashicorp/vault/logical"
+ logicaltest "github.com/hashicorp/vault/logical/testing"
+)
+
+func TestBackend_basic(t *testing.T) {
+ logicaltest.Test(t, logicaltest.TestCase{
+ Factory: Factory,
+ Steps: []logicaltest.TestStep{
+ testAccStepMapAppId(t),
+ testAccStepMapUserId(t),
+ testAccLogin(t, ""),
+ testAccLoginAppIDInPath(t, ""),
+ testAccLoginInvalid(t),
+ testAccStepDeleteUserId(t),
+ testAccLoginDeleted(t),
+ },
+ })
+}
+
+func TestBackend_cidr(t *testing.T) {
+ logicaltest.Test(t, logicaltest.TestCase{
+ Factory: Factory,
+ Steps: []logicaltest.TestStep{
+ testAccStepMapAppIdDisplayName(t),
+ testAccStepMapUserIdCidr(t, "192.168.1.0/16"),
+ testAccLoginCidr(t, "192.168.1.5", false),
+ testAccLoginCidr(t, "10.0.1.5", true),
+ testAccLoginCidr(t, "", true),
+ },
+ })
+}
+
+func TestBackend_displayName(t *testing.T) {
+ logicaltest.Test(t, logicaltest.TestCase{
+ Factory: Factory,
+ Steps: []logicaltest.TestStep{
+ testAccStepMapAppIdDisplayName(t),
+ testAccStepMapUserId(t),
+ testAccLogin(t, "tubbin"),
+ testAccLoginAppIDInPath(t, "tubbin"),
+ testAccLoginInvalid(t),
+ testAccStepDeleteUserId(t),
+ testAccLoginDeleted(t),
+ },
+ })
+}
+
+// Verify that we are able to update from non-salted (<0.2) to
+// using a Salt for the paths
+func TestBackend_upgradeToSalted(t *testing.T) {
+ inm := new(logical.InmemStorage)
+
+ // Create some fake keys
+ se, _ := logical.StorageEntryJSON("struct/map/app-id/foo",
+ map[string]string{"value": "test"})
+ inm.Put(se)
+ se, _ = logical.StorageEntryJSON("struct/map/user-id/bar",
+ map[string]string{"value": "foo"})
+ inm.Put(se)
+
+ // Initialize the backend, this should do the automatic upgrade
+ conf := &logical.BackendConfig{
+ StorageView: inm,
+ }
+ backend, err := Factory(conf)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ err = backend.Initialize()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Check the keys have been upgraded
+ out, err := inm.Get("struct/map/app-id/foo")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out != nil {
+ t.Fatalf("unexpected key")
+ }
+ out, err = inm.Get("struct/map/user-id/bar")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out != nil {
+ t.Fatalf("unexpected key")
+ }
+
+ // Backend should still be able to resolve
+ req := logical.TestRequest(t, logical.ReadOperation, "map/app-id/foo")
+ req.Storage = inm
+ resp, err := backend.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp.Data["value"] != "test" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ req = logical.TestRequest(t, logical.ReadOperation, "map/user-id/bar")
+ req.Storage = inm
+ resp, err = backend.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp.Data["value"] != "foo" {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
+
+func testAccStepMapAppId(t *testing.T) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "map/app-id/foo",
+ Data: map[string]interface{}{
+ "value": "foo,bar",
+ },
+ }
+}
+
+func testAccStepMapAppIdDisplayName(t *testing.T) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "map/app-id/foo",
+ Data: map[string]interface{}{
+ "display_name": "tubbin",
+ "value": "foo,bar",
+ },
+ }
+}
+
+func testAccStepMapUserId(t *testing.T) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "map/user-id/42",
+ Data: map[string]interface{}{
+ "value": "foo",
+ },
+ }
+}
+
+func testAccStepDeleteUserId(t *testing.T) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.DeleteOperation,
+ Path: "map/user-id/42",
+ }
+}
+
+func testAccStepMapUserIdCidr(t *testing.T, cidr string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "map/user-id/42",
+ Data: map[string]interface{}{
+ "value": "foo",
+ "cidr_block": cidr,
+ },
+ }
+}
+
+func testAccLogin(t *testing.T, display string) logicaltest.TestStep {
+ checkTTL := func(resp *logical.Response) error {
+ if resp.Auth.LeaseOptions.TTL.String() != "768h0m0s" {
+ return fmt.Errorf("invalid TTL")
+ }
+ return nil
+ }
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "login",
+ Data: map[string]interface{}{
+ "app_id": "foo",
+ "user_id": "42",
+ },
+ Unauthenticated: true,
+
+ Check: logicaltest.TestCheckMulti(
+ logicaltest.TestCheckAuth([]string{"bar", "default", "foo"}),
+ logicaltest.TestCheckAuthDisplayName(display),
+ checkTTL,
+ ),
+ }
+}
+
+func testAccLoginAppIDInPath(t *testing.T, display string) logicaltest.TestStep {
+ checkTTL := func(resp *logical.Response) error {
+ if resp.Auth.LeaseOptions.TTL.String() != "768h0m0s" {
+ return fmt.Errorf("invalid TTL")
+ }
+ return nil
+ }
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "login/foo",
+ Data: map[string]interface{}{
+ "user_id": "42",
+ },
+ Unauthenticated: true,
+
+ Check: logicaltest.TestCheckMulti(
+ logicaltest.TestCheckAuth([]string{"bar", "default", "foo"}),
+ logicaltest.TestCheckAuthDisplayName(display),
+ checkTTL,
+ ),
+ }
+}
+
+func testAccLoginCidr(t *testing.T, ip string, err bool) logicaltest.TestStep {
+ check := logicaltest.TestCheckError()
+ if !err {
+ check = logicaltest.TestCheckAuth([]string{"bar", "default", "foo"})
+ }
+
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "login",
+ Data: map[string]interface{}{
+ "app_id": "foo",
+ "user_id": "42",
+ },
+ ErrorOk: err,
+ Unauthenticated: true,
+ RemoteAddr: ip,
+
+ Check: check,
+ }
+}
+
+func testAccLoginInvalid(t *testing.T) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "login",
+ Data: map[string]interface{}{
+ "app_id": "foo",
+ "user_id": "48",
+ },
+ ErrorOk: true,
+ Unauthenticated: true,
+
+ Check: logicaltest.TestCheckError(),
+ }
+}
+
+func testAccLoginDeleted(t *testing.T) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "login",
+ Data: map[string]interface{}{
+ "app_id": "foo",
+ "user_id": "42",
+ },
+ ErrorOk: true,
+ Unauthenticated: true,
+
+ Check: logicaltest.TestCheckError(),
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/app-id/path_login.go b/vendor/github.com/hashicorp/vault/builtin/credential/app-id/path_login.go
new file mode 100644
index 0000000..1fba37a
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/app-id/path_login.go
@@ -0,0 +1,210 @@
+package appId
+
+import (
+ "crypto/sha1"
+ "crypto/subtle"
+ "encoding/hex"
+ "fmt"
+ "net"
+ "strings"
+
+ "github.com/hashicorp/vault/helper/policyutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathLoginWithAppIDPath(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "login/(?P.+)",
+ Fields: map[string]*framework.FieldSchema{
+ "app_id": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "The unique app ID",
+ },
+
+ "user_id": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "The unique user ID",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathLogin,
+ },
+
+ HelpSynopsis: pathLoginSyn,
+ HelpDescription: pathLoginDesc,
+ }
+}
+
+func pathLogin(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "login$",
+ Fields: map[string]*framework.FieldSchema{
+ "app_id": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "The unique app ID",
+ },
+
+ "user_id": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "The unique user ID",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathLogin,
+ },
+
+ HelpSynopsis: pathLoginSyn,
+ HelpDescription: pathLoginDesc,
+ }
+}
+
+func (b *backend) pathLogin(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ appId := data.Get("app_id").(string)
+ userId := data.Get("user_id").(string)
+
+ var displayName string
+ if dispName, resp, err := b.verifyCredentials(req, appId, userId); err != nil {
+ return nil, err
+ } else if resp != nil {
+ return resp, nil
+ } else {
+ displayName = dispName
+ }
+
+ // Get the policies associated with the app
+ policies, err := b.MapAppId.Policies(req.Storage, appId)
+ if err != nil {
+ return nil, err
+ }
+
+ // Store hashes of the app ID and user ID for the metadata
+ appIdHash := sha1.Sum([]byte(appId))
+ userIdHash := sha1.Sum([]byte(userId))
+ metadata := map[string]string{
+ "app-id": "sha1:" + hex.EncodeToString(appIdHash[:]),
+ "user-id": "sha1:" + hex.EncodeToString(userIdHash[:]),
+ }
+
+ return &logical.Response{
+ Auth: &logical.Auth{
+ InternalData: map[string]interface{}{
+ "app-id": appId,
+ "user-id": userId,
+ },
+ DisplayName: displayName,
+ Policies: policies,
+ Metadata: metadata,
+ LeaseOptions: logical.LeaseOptions{
+ Renewable: true,
+ },
+ },
+ }, nil
+}
+
+func (b *backend) pathLoginRenew(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ appId := req.Auth.InternalData["app-id"].(string)
+ userId := req.Auth.InternalData["user-id"].(string)
+
+ // Skipping CIDR verification to enable renewal from machines other than
+ // the ones encompassed by CIDR block.
+ if _, resp, err := b.verifyCredentials(req, appId, userId); err != nil {
+ return nil, err
+ } else if resp != nil {
+ return resp, nil
+ }
+
+ // Get the policies associated with the app
+ mapPolicies, err := b.MapAppId.Policies(req.Storage, appId)
+ if err != nil {
+ return nil, err
+ }
+ if !policyutil.EquivalentPolicies(mapPolicies, req.Auth.Policies) {
+ return nil, fmt.Errorf("policies do not match")
+ }
+
+ return framework.LeaseExtend(0, 0, b.System())(req, d)
+}
+
+func (b *backend) verifyCredentials(req *logical.Request, appId, userId string) (string, *logical.Response, error) {
+ // Ensure both appId and userId are provided
+ if appId == "" || userId == "" {
+ return "", logical.ErrorResponse("missing 'app_id' or 'user_id'"), nil
+ }
+
+ // Look up the apps that this user is allowed to access
+ appsMap, err := b.MapUserId.Get(req.Storage, userId)
+ if err != nil {
+ return "", nil, err
+ }
+ if appsMap == nil {
+ return "", logical.ErrorResponse("invalid user ID or app ID"), nil
+ }
+
+ // If there is a CIDR block restriction, check that
+ if raw, ok := appsMap["cidr_block"]; ok {
+ _, cidr, err := net.ParseCIDR(raw.(string))
+ if err != nil {
+ return "", nil, fmt.Errorf("invalid restriction cidr: %s", err)
+ }
+
+ var addr string
+ if req.Connection != nil {
+ addr = req.Connection.RemoteAddr
+ }
+ if addr == "" || !cidr.Contains(net.ParseIP(addr)) {
+ return "", logical.ErrorResponse("unauthorized source address"), nil
+ }
+ }
+
+ appsRaw, ok := appsMap["value"]
+ if !ok {
+ appsRaw = ""
+ }
+
+ apps, ok := appsRaw.(string)
+ if !ok {
+ return "", nil, fmt.Errorf("internal error: mapping is not a string")
+ }
+
+ // Verify that the app is in the list
+ found := false
+ appIdBytes := []byte(appId)
+ for _, app := range strings.Split(apps, ",") {
+ match := []byte(strings.TrimSpace(app))
+ // Protect against a timing attack with the app_id comparison
+ if subtle.ConstantTimeCompare(match, appIdBytes) == 1 {
+ found = true
+ }
+ }
+ if !found {
+ return "", logical.ErrorResponse("invalid user ID or app ID"), nil
+ }
+
+ // Get the raw data associated with the app
+ appRaw, err := b.MapAppId.Get(req.Storage, appId)
+ if err != nil {
+ return "", nil, err
+ }
+ if appRaw == nil {
+ return "", logical.ErrorResponse("invalid user ID or app ID"), nil
+ }
+ var displayName string
+ if raw, ok := appRaw["display_name"]; ok {
+ displayName = raw.(string)
+ }
+
+ return displayName, nil, nil
+}
+
+const pathLoginSyn = `
+Log in with an App ID and User ID.
+`
+
+const pathLoginDesc = `
+This endpoint authenticates using an application ID, user ID and potential the IP address of the connecting client.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/approle/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/approle/backend.go
new file mode 100644
index 0000000..cd5d97b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/approle/backend.go
@@ -0,0 +1,130 @@
+package approle
+
+import (
+ "sync"
+
+ "github.com/hashicorp/vault/helper/locksutil"
+ "github.com/hashicorp/vault/helper/salt"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+type backend struct {
+ *framework.Backend
+
+ // The salt value to be used by the information to be accessed only
+ // by this backend.
+ salt *salt.Salt
+
+ // The view to use when creating the salt
+ view logical.Storage
+
+ // Guard to clean-up the expired SecretID entries
+ tidySecretIDCASGuard uint32
+
+ // Locks to make changes to role entries. These will be initialized to a
+ // predefined number of locks when the backend is created, and will be
+ // indexed based on salted role names.
+ roleLocks []*locksutil.LockEntry
+
+ // Locks to make changes to the storage entries of RoleIDs generated. These
+ // will be initialized to a predefined number of locks when the backend is
+ // created, and will be indexed based on the salted RoleIDs.
+ roleIDLocks []*locksutil.LockEntry
+
+ // Locks to make changes to the storage entries of SecretIDs generated.
+ // These will be initialized to a predefined number of locks when the
+ // backend is created, and will be indexed based on the HMAC-ed SecretIDs.
+ secretIDLocks []*locksutil.LockEntry
+
+ // Locks to make changes to the storage entries of SecretIDAccessors
+ // generated. These will be initialized to a predefined number of locks
+ // when the backend is created, and will be indexed based on the
+ // SecretIDAccessors itself.
+ secretIDAccessorLocks []*locksutil.LockEntry
+
+ // secretIDListingLock is a dedicated lock for listing SecretIDAccessors
+ // for all the SecretIDs issued against an approle
+ secretIDListingLock sync.RWMutex
+}
+
+func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
+ b, err := Backend(conf)
+ if err != nil {
+ return nil, err
+ }
+ return b.Setup(conf)
+}
+
+func Backend(conf *logical.BackendConfig) (*backend, error) {
+ // Create a backend object
+ b := &backend{
+ view: conf.StorageView,
+
+ // Create locks to modify the registered roles
+ roleLocks: locksutil.CreateLocks(),
+
+ // Create locks to modify the generated RoleIDs
+ roleIDLocks: locksutil.CreateLocks(),
+
+ // Create locks to modify the generated SecretIDs
+ secretIDLocks: locksutil.CreateLocks(),
+
+ // Create locks to modify the generated SecretIDAccessors
+ secretIDAccessorLocks: locksutil.CreateLocks(),
+ }
+
+ // Attach the paths and secrets that are to be handled by the backend
+ b.Backend = &framework.Backend{
+ // Register a periodic function that deletes the expired SecretID entries
+ PeriodicFunc: b.periodicFunc,
+ Help: backendHelp,
+ AuthRenew: b.pathLoginRenew,
+ PathsSpecial: &logical.Paths{
+ Unauthenticated: []string{
+ "login",
+ },
+ },
+ Paths: framework.PathAppend(
+ rolePaths(b),
+ []*framework.Path{
+ pathLogin(b),
+ pathTidySecretID(b),
+ },
+ ),
+ Init: b.initialize,
+ }
+ return b, nil
+}
+
+func (b *backend) initialize() error {
+ salt, err := salt.NewSalt(b.view, &salt.Config{
+ HashFunc: salt.SHA256Hash,
+ })
+ if err != nil {
+ return err
+ }
+ b.salt = salt
+ return nil
+}
+
+// periodicFunc of the backend will be invoked once a minute by the RollbackManager.
+// RoleRole backend utilizes this function to delete expired SecretID entries.
+// This could mean that the SecretID may live in the backend upto 1 min after its
+// expiration. The deletion of SecretIDs are not security sensitive and it is okay
+// to delay the removal of SecretIDs by a minute.
+func (b *backend) periodicFunc(req *logical.Request) error {
+ // Initiate clean-up of expired SecretID entries
+ b.tidySecretID(req.Storage)
+ return nil
+}
+
+const backendHelp = `
+Any registered Role can authenticate itself with Vault. The credentials
+depends on the constraints that are set on the Role. One common required
+credential is the 'role_id' which is a unique identifier of the Role.
+It can be retrieved from the 'role//role-id' endpoint.
+
+The default constraint configuration is 'bind_secret_id', which requires
+the credential 'secret_id' to be presented during login. Refer to the
+documentation for other types of constraints.`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/approle/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/approle/backend_test.go
new file mode 100644
index 0000000..e49cf48
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/approle/backend_test.go
@@ -0,0 +1,29 @@
+package approle
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+func createBackendWithStorage(t *testing.T) (*backend, logical.Storage) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+
+ b, err := Backend(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if b == nil {
+ t.Fatalf("failed to create backend")
+ }
+ _, err = b.Backend.Setup(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = b.Initialize()
+ if err != nil {
+ t.Fatal(err)
+ }
+ return b, config.StorageView
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_login.go b/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_login.go
new file mode 100644
index 0000000..d40530e
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_login.go
@@ -0,0 +1,106 @@
+package approle
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathLogin(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "login$",
+ Fields: map[string]*framework.FieldSchema{
+ "role_id": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Unique identifier of the Role. Required to be supplied when the 'bind_secret_id' constraint is set.",
+ },
+ "secret_id": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "",
+ Description: "SecretID belong to the App role",
+ },
+ },
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathLoginUpdate,
+ },
+ HelpSynopsis: pathLoginHelpSys,
+ HelpDescription: pathLoginHelpDesc,
+ }
+}
+
+// Returns the Auth object indicating the authentication and authorization information
+// if the credentials provided are validated by the backend.
+func (b *backend) pathLoginUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ role, roleName, metadata, err := b.validateCredentials(req, data)
+ if err != nil || role == nil {
+ return logical.ErrorResponse(fmt.Sprintf("failed to validate SecretID: %s", err)), nil
+ }
+
+ auth := &logical.Auth{
+ NumUses: role.TokenNumUses,
+ Period: role.Period,
+ InternalData: map[string]interface{}{
+ "role_name": roleName,
+ },
+ Metadata: metadata,
+ Policies: role.Policies,
+ LeaseOptions: logical.LeaseOptions{
+ Renewable: true,
+ },
+ }
+
+ // If 'Period' is set, use the value of 'Period' as the TTL.
+ // Otherwise, set the normal TokenTTL.
+ if role.Period > time.Duration(0) {
+ auth.TTL = role.Period
+ } else {
+ auth.TTL = role.TokenTTL
+ }
+
+ return &logical.Response{
+ Auth: auth,
+ }, nil
+}
+
+// Invoked when the token issued by this backend is attempting a renewal.
+func (b *backend) pathLoginRenew(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := req.Auth.InternalData["role_name"].(string)
+ if roleName == "" {
+ return nil, fmt.Errorf("failed to fetch role_name during renewal")
+ }
+
+ // Ensure that the Role still exists.
+ role, err := b.roleEntry(req.Storage, roleName)
+ if err != nil {
+ return nil, fmt.Errorf("failed to validate role %s during renewal:%s", roleName, err)
+ }
+ if role == nil {
+ return nil, fmt.Errorf("role %s does not exist during renewal", roleName)
+ }
+
+ // If 'Period' is set on the Role, the token should never expire.
+ // Replenish the TTL with 'Period's value.
+ if role.Period > time.Duration(0) {
+ // If 'Period' was updated after the token was issued,
+ // token will bear the updated 'Period' value as its TTL.
+ req.Auth.TTL = role.Period
+ return &logical.Response{Auth: req.Auth}, nil
+ } else {
+ return framework.LeaseExtend(role.TokenTTL, role.TokenMaxTTL, b.System())(req, data)
+ }
+}
+
+const pathLoginHelpSys = "Issue a token based on the credentials supplied"
+
+const pathLoginHelpDesc = `
+While the credential 'role_id' is required at all times,
+other credentials required depends on the properties App role
+to which the 'role_id' belongs to. The 'bind_secret_id'
+constraint (enabled by default) on the App role requires the
+'secret_id' credential to be presented.
+
+'role_id' is fetched using the 'role//role_id'
+endpoint and 'secret_id' is fetched using the 'role//secret_id'
+endpoint.`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_login_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_login_test.go
new file mode 100644
index 0000000..211b40f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_login_test.go
@@ -0,0 +1,58 @@
+package approle
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestAppRole_RoleLogin(t *testing.T) {
+ var resp *logical.Response
+ var err error
+ b, storage := createBackendWithStorage(t)
+
+ createRole(t, b, storage, "role1", "a,b,c")
+ roleRoleIDReq := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "role/role1/role-id",
+ Storage: storage,
+ }
+ resp, err = b.HandleRequest(roleRoleIDReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+ roleID := resp.Data["role_id"]
+
+ roleSecretIDReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "role/role1/secret-id",
+ Storage: storage,
+ }
+ resp, err = b.HandleRequest(roleSecretIDReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+ secretID := resp.Data["secret_id"]
+
+ loginData := map[string]interface{}{
+ "role_id": roleID,
+ "secret_id": secretID,
+ }
+ loginReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "login",
+ Storage: storage,
+ Data: loginData,
+ Connection: &logical.Connection{
+ RemoteAddr: "127.0.0.1",
+ },
+ }
+ resp, err = b.HandleRequest(loginReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if resp.Auth == nil {
+ t.Fatalf("expected a non-nil auth object in the response")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_role.go b/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_role.go
new file mode 100644
index 0000000..2a1ff1a
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_role.go
@@ -0,0 +1,2122 @@
+package approle
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/fatih/structs"
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/helper/cidrutil"
+ "github.com/hashicorp/vault/helper/locksutil"
+ "github.com/hashicorp/vault/helper/policyutil"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+// roleStorageEntry stores all the options that are set on an role
+type roleStorageEntry struct {
+ // UUID that uniquely represents this role. This serves as a credential
+ // to perform login using this role.
+ RoleID string `json:"role_id" structs:"role_id" mapstructure:"role_id"`
+
+ // UUID that serves as the HMAC key for the hashing the 'secret_id's
+ // of the role
+ HMACKey string `json:"hmac_key" structs:"hmac_key" mapstructure:"hmac_key"`
+
+ // Policies that are to be required by the token to access this role
+ Policies []string `json:"policies" structs:"policies" mapstructure:"policies"`
+
+ // Number of times the SecretID generated against this role can be
+ // used to perform login operation
+ SecretIDNumUses int `json:"secret_id_num_uses" structs:"secret_id_num_uses" mapstructure:"secret_id_num_uses"`
+
+ // Duration (less than the backend mount's max TTL) after which a
+ // SecretID generated against the role will expire
+ SecretIDTTL time.Duration `json:"secret_id_ttl" structs:"secret_id_ttl" mapstructure:"secret_id_ttl"`
+
+ // TokenNumUses defines the number of allowed uses of the token issued
+ TokenNumUses int `json:"token_num_uses" mapstructure:"token_num_uses" structs:"token_num_uses"`
+
+ // Duration before which an issued token must be renewed
+ TokenTTL time.Duration `json:"token_ttl" structs:"token_ttl" mapstructure:"token_ttl"`
+
+ // Duration after which an issued token should not be allowed to be renewed
+ TokenMaxTTL time.Duration `json:"token_max_ttl" structs:"token_max_ttl" mapstructure:"token_max_ttl"`
+
+ // A constraint, if set, requires 'secret_id' credential to be presented during login
+ BindSecretID bool `json:"bind_secret_id" structs:"bind_secret_id" mapstructure:"bind_secret_id"`
+
+ // A constraint, if set, specifies the CIDR blocks from which logins should be allowed
+ BoundCIDRList string `json:"bound_cidr_list" structs:"bound_cidr_list" mapstructure:"bound_cidr_list"`
+
+ // Period, if set, indicates that the token generated using this role
+ // should never expire. The token should be renewed within the duration
+ // specified by this value. The renewal duration will be fixed if the
+ // value is not modified on the role. If the `Period` in the role is modified,
+ // a token will pick up the new value during its next renewal.
+ Period time.Duration `json:"period" mapstructure:"period" structs:"period"`
+}
+
+// roleIDStorageEntry represents the reverse mapping from RoleID to Role
+type roleIDStorageEntry struct {
+ Name string `json:"name" structs:"name" mapstructure:"name"`
+}
+
+// rolePaths creates all the paths that are used to register and manage an role.
+//
+// Paths returned:
+// role/ - For listing all the registered roles
+// role/ - For registering an role
+// role//policies - For updating the param
+// role//secret-id-num-uses - For updating the param
+// role//secret-id-ttl - For updating the param
+// role//token-ttl - For updating the param
+// role//token-max-ttl - For updating the param
+// role//token-num-uses - For updating the param
+// role//bind-secret-id - For updating the param
+// role//bound-cidr-list - For updating the param
+// role//period - For updating the param
+// role//role-id - For fetching the role_id of an role
+// role//secret-id - For issuing a secret_id against an role, also to list the secret_id_accessorss
+// role//custom-secret-id - For assigning a custom SecretID against an role
+// role//secret-id/lookup - For reading the properties of a secret_id
+// role//secret-id/destroy - For deleting a secret_id
+// role//secret-id-accessor/lookup - For reading secret_id using accessor
+// role//secret-id-accessor/destroy - For deleting secret_id using accessor
+func rolePaths(b *backend) []*framework.Path {
+ return []*framework.Path{
+ &framework.Path{
+ Pattern: "role/?",
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.pathRoleList,
+ },
+ HelpSynopsis: strings.TrimSpace(roleHelp["role-list"][0]),
+ HelpDescription: strings.TrimSpace(roleHelp["role-list"][1]),
+ },
+ &framework.Path{
+ Pattern: "role/" + framework.GenericNameRegex("role_name"),
+ Fields: map[string]*framework.FieldSchema{
+ "role_name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role.",
+ },
+ "bind_secret_id": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: true,
+ Description: "Impose secret_id to be presented when logging in using this role. Defaults to 'true'.",
+ },
+ "bound_cidr_list": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `Comma separated list of CIDR blocks, if set, specifies blocks of IP
+addresses which can perform the login operation`,
+ },
+ "policies": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "default",
+ Description: "Comma separated list of policies on the role.",
+ },
+ "secret_id_num_uses": &framework.FieldSchema{
+ Type: framework.TypeInt,
+ Description: `Number of times a SecretID can access the role, after which the SecretID
+will expire. Defaults to 0 meaning that the the secret_id is of unlimited use.`,
+ },
+ "secret_id_ttl": &framework.FieldSchema{
+ Type: framework.TypeDurationSecond,
+ Description: `Duration in seconds after which the issued SecretID should expire. Defaults
+to 0, meaning no expiration.`,
+ },
+ "token_num_uses": &framework.FieldSchema{
+ Type: framework.TypeInt,
+ Description: `Number of times issued tokens can be used`,
+ },
+ "token_ttl": &framework.FieldSchema{
+ Type: framework.TypeDurationSecond,
+ Description: `Duration in seconds after which the issued token should expire. Defaults
+to 0, in which case the value will fall back to the system/mount defaults.`,
+ },
+ "token_max_ttl": &framework.FieldSchema{
+ Type: framework.TypeDurationSecond,
+ Description: `Duration in seconds after which the issued token should not be allowed to
+be renewed. Defaults to 0, in which case the value will fall back to the system/mount defaults.`,
+ },
+ "period": &framework.FieldSchema{
+ Type: framework.TypeDurationSecond,
+ Default: 0,
+ Description: `If set, indicates that the token generated using this role
+should never expire. The token should be renewed within the
+duration specified by this value. At each renewal, the token's
+TTL will be set to the value of this parameter.`,
+ },
+ "role_id": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Identifier of the role. Defaults to a UUID.",
+ },
+ },
+ ExistenceCheck: b.pathRoleExistenceCheck,
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.CreateOperation: b.pathRoleCreateUpdate,
+ logical.UpdateOperation: b.pathRoleCreateUpdate,
+ logical.ReadOperation: b.pathRoleRead,
+ logical.DeleteOperation: b.pathRoleDelete,
+ },
+ HelpSynopsis: strings.TrimSpace(roleHelp["role"][0]),
+ HelpDescription: strings.TrimSpace(roleHelp["role"][1]),
+ },
+ &framework.Path{
+ Pattern: "role/" + framework.GenericNameRegex("role_name") + "/policies$",
+ Fields: map[string]*framework.FieldSchema{
+ "role_name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role.",
+ },
+ "policies": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "default",
+ Description: "Comma separated list of policies on the role.",
+ },
+ },
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathRolePoliciesUpdate,
+ logical.ReadOperation: b.pathRolePoliciesRead,
+ logical.DeleteOperation: b.pathRolePoliciesDelete,
+ },
+ HelpSynopsis: strings.TrimSpace(roleHelp["role-policies"][0]),
+ HelpDescription: strings.TrimSpace(roleHelp["role-policies"][1]),
+ },
+ &framework.Path{
+ Pattern: "role/" + framework.GenericNameRegex("role_name") + "/bound-cidr-list$",
+ Fields: map[string]*framework.FieldSchema{
+ "role_name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role.",
+ },
+ "bound_cidr_list": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `Comma separated list of CIDR blocks, if set, specifies blocks of IP
+addresses which can perform the login operation`,
+ },
+ },
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathRoleBoundCIDRListUpdate,
+ logical.ReadOperation: b.pathRoleBoundCIDRListRead,
+ logical.DeleteOperation: b.pathRoleBoundCIDRListDelete,
+ },
+ HelpSynopsis: strings.TrimSpace(roleHelp["role-bound-cidr-list"][0]),
+ HelpDescription: strings.TrimSpace(roleHelp["role-bound-cidr-list"][1]),
+ },
+ &framework.Path{
+ Pattern: "role/" + framework.GenericNameRegex("role_name") + "/bind-secret-id$",
+ Fields: map[string]*framework.FieldSchema{
+ "role_name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role.",
+ },
+ "bind_secret_id": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: true,
+ Description: "Impose secret_id to be presented when logging in using this role.",
+ },
+ },
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathRoleBindSecretIDUpdate,
+ logical.ReadOperation: b.pathRoleBindSecretIDRead,
+ logical.DeleteOperation: b.pathRoleBindSecretIDDelete,
+ },
+ HelpSynopsis: strings.TrimSpace(roleHelp["role-bind-secret-id"][0]),
+ HelpDescription: strings.TrimSpace(roleHelp["role-bind-secret-id"][1]),
+ },
+ &framework.Path{
+ Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-num-uses$",
+ Fields: map[string]*framework.FieldSchema{
+ "role_name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role.",
+ },
+ "secret_id_num_uses": &framework.FieldSchema{
+ Type: framework.TypeInt,
+ Description: "Number of times a SecretID can access the role, after which the SecretID will expire.",
+ },
+ },
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathRoleSecretIDNumUsesUpdate,
+ logical.ReadOperation: b.pathRoleSecretIDNumUsesRead,
+ logical.DeleteOperation: b.pathRoleSecretIDNumUsesDelete,
+ },
+ HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-num-uses"][0]),
+ HelpDescription: strings.TrimSpace(roleHelp["role-secret-id-num-uses"][1]),
+ },
+ &framework.Path{
+ Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-ttl$",
+ Fields: map[string]*framework.FieldSchema{
+ "role_name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role.",
+ },
+ "secret_id_ttl": &framework.FieldSchema{
+ Type: framework.TypeDurationSecond,
+ Description: `Duration in seconds after which the issued SecretID should expire. Defaults
+to 0, meaning no expiration.`,
+ },
+ },
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathRoleSecretIDTTLUpdate,
+ logical.ReadOperation: b.pathRoleSecretIDTTLRead,
+ logical.DeleteOperation: b.pathRoleSecretIDTTLDelete,
+ },
+ HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-ttl"][0]),
+ HelpDescription: strings.TrimSpace(roleHelp["role-secret-id-ttl"][1]),
+ },
+ &framework.Path{
+ Pattern: "role/" + framework.GenericNameRegex("role_name") + "/period$",
+ Fields: map[string]*framework.FieldSchema{
+ "role_name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role.",
+ },
+ "period": &framework.FieldSchema{
+ Type: framework.TypeDurationSecond,
+ Default: 0,
+ Description: `If set, indicates that the token generated using this role
+should never expire. The token should be renewed within the
+duration specified by this value. At each renewal, the token's
+TTL will be set to the value of this parameter.`,
+ },
+ },
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathRolePeriodUpdate,
+ logical.ReadOperation: b.pathRolePeriodRead,
+ logical.DeleteOperation: b.pathRolePeriodDelete,
+ },
+ HelpSynopsis: strings.TrimSpace(roleHelp["role-period"][0]),
+ HelpDescription: strings.TrimSpace(roleHelp["role-period"][1]),
+ },
+ &framework.Path{
+ Pattern: "role/" + framework.GenericNameRegex("role_name") + "/token-num-uses$",
+ Fields: map[string]*framework.FieldSchema{
+ "role_name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role.",
+ },
+ "token_num_uses": &framework.FieldSchema{
+ Type: framework.TypeInt,
+ Description: `Number of times issued tokens can be used`,
+ },
+ },
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathRoleTokenNumUsesUpdate,
+ logical.ReadOperation: b.pathRoleTokenNumUsesRead,
+ logical.DeleteOperation: b.pathRoleTokenNumUsesDelete,
+ },
+ HelpSynopsis: strings.TrimSpace(roleHelp["role-token-num-uses"][0]),
+ HelpDescription: strings.TrimSpace(roleHelp["role-token-num-uses"][1]),
+ },
+ &framework.Path{
+ Pattern: "role/" + framework.GenericNameRegex("role_name") + "/token-ttl$",
+ Fields: map[string]*framework.FieldSchema{
+ "role_name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role.",
+ },
+ "token_ttl": &framework.FieldSchema{
+ Type: framework.TypeDurationSecond,
+ Description: `Duration in seconds after which the issued token should expire. Defaults
+to 0, in which case the value will fall back to the system/mount defaults.`,
+ },
+ },
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathRoleTokenTTLUpdate,
+ logical.ReadOperation: b.pathRoleTokenTTLRead,
+ logical.DeleteOperation: b.pathRoleTokenTTLDelete,
+ },
+ HelpSynopsis: strings.TrimSpace(roleHelp["role-token-ttl"][0]),
+ HelpDescription: strings.TrimSpace(roleHelp["role-token-ttl"][1]),
+ },
+ &framework.Path{
+ Pattern: "role/" + framework.GenericNameRegex("role_name") + "/token-max-ttl$",
+ Fields: map[string]*framework.FieldSchema{
+ "role_name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role.",
+ },
+ "token_max_ttl": &framework.FieldSchema{
+ Type: framework.TypeDurationSecond,
+ Description: `Duration in seconds after which the issued token should not be allowed to
+be renewed. Defaults to 0, in which case the value will fall back to the system/mount defaults.`,
+ },
+ },
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathRoleTokenMaxTTLUpdate,
+ logical.ReadOperation: b.pathRoleTokenMaxTTLRead,
+ logical.DeleteOperation: b.pathRoleTokenMaxTTLDelete,
+ },
+ HelpSynopsis: strings.TrimSpace(roleHelp["role-token-max-ttl"][0]),
+ HelpDescription: strings.TrimSpace(roleHelp["role-token-max-ttl"][1]),
+ },
+ &framework.Path{
+ Pattern: "role/" + framework.GenericNameRegex("role_name") + "/role-id$",
+ Fields: map[string]*framework.FieldSchema{
+ "role_name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role.",
+ },
+ "role_id": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Identifier of the role. Defaults to a UUID.",
+ },
+ },
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathRoleRoleIDRead,
+ logical.UpdateOperation: b.pathRoleRoleIDUpdate,
+ },
+ HelpSynopsis: strings.TrimSpace(roleHelp["role-id"][0]),
+ HelpDescription: strings.TrimSpace(roleHelp["role-id"][1]),
+ },
+ &framework.Path{
+ Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id/?$",
+ Fields: map[string]*framework.FieldSchema{
+ "role_name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role.",
+ },
+ "metadata": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `Metadata to be tied to the SecretID. This should be a JSON
+formatted string containing the metadata in key value pairs.`,
+ },
+ "cidr_list": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `Comma separated list of CIDR blocks enforcing secret IDs to be used from
+specific set of IP addresses. If 'bound_cidr_list' is set on the role, then the
+list of CIDR blocks listed here should be a subset of the CIDR blocks listed on
+the role.`,
+ },
+ },
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathRoleSecretIDUpdate,
+ logical.ListOperation: b.pathRoleSecretIDList,
+ },
+ HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id"][0]),
+ HelpDescription: strings.TrimSpace(roleHelp["role-secret-id"][1]),
+ },
+ &framework.Path{
+ Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id/lookup/?$",
+ Fields: map[string]*framework.FieldSchema{
+ "role_name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role.",
+ },
+ "secret_id": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "SecretID attached to the role.",
+ },
+ },
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathRoleSecretIDLookupUpdate,
+ },
+ HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-lookup"][0]),
+ HelpDescription: strings.TrimSpace(roleHelp["role-secret-id-lookup"][1]),
+ },
+ &framework.Path{
+ Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id/destroy/?$",
+ Fields: map[string]*framework.FieldSchema{
+ "role_name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role.",
+ },
+ "secret_id": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "SecretID attached to the role.",
+ },
+ },
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathRoleSecretIDDestroyUpdateDelete,
+ logical.DeleteOperation: b.pathRoleSecretIDDestroyUpdateDelete,
+ },
+ HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-destroy"][0]),
+ HelpDescription: strings.TrimSpace(roleHelp["role-secret-id-destroy"][1]),
+ },
+ &framework.Path{
+ Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-accessor/lookup/?$",
+ Fields: map[string]*framework.FieldSchema{
+ "role_name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role.",
+ },
+ "secret_id_accessor": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Accessor of the SecretID",
+ },
+ },
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathRoleSecretIDAccessorLookupUpdate,
+ },
+ HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-accessor"][0]),
+ HelpDescription: strings.TrimSpace(roleHelp["role-secret-id-accessor"][1]),
+ },
+ &framework.Path{
+ Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-accessor/destroy/?$",
+ Fields: map[string]*framework.FieldSchema{
+ "role_name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role.",
+ },
+ "secret_id_accessor": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Accessor of the SecretID",
+ },
+ },
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathRoleSecretIDAccessorDestroyUpdateDelete,
+ logical.DeleteOperation: b.pathRoleSecretIDAccessorDestroyUpdateDelete,
+ },
+ HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-accessor"][0]),
+ HelpDescription: strings.TrimSpace(roleHelp["role-secret-id-accessor"][1]),
+ },
+ &framework.Path{
+ Pattern: "role/" + framework.GenericNameRegex("role_name") + "/custom-secret-id$",
+ Fields: map[string]*framework.FieldSchema{
+ "role_name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role.",
+ },
+ "secret_id": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "SecretID to be attached to the role.",
+ },
+ "metadata": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `Metadata to be tied to the SecretID. This should be a JSON
+formatted string containing metadata in key value pairs.`,
+ },
+ "cidr_list": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `Comma separated list of CIDR blocks enforcing secret IDs to be used from
+specific set of IP addresses. If 'bound_cidr_list' is set on the role, then the
+list of CIDR blocks listed here should be a subset of the CIDR blocks listed on
+the role.`,
+ },
+ },
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathRoleCustomSecretIDUpdate,
+ },
+ HelpSynopsis: strings.TrimSpace(roleHelp["role-custom-secret-id"][0]),
+ HelpDescription: strings.TrimSpace(roleHelp["role-custom-secret-id"][1]),
+ },
+ }
+}
+
+// pathRoleExistenceCheck returns whether the role with the given name exists or not.
+func (b *backend) pathRoleExistenceCheck(req *logical.Request, data *framework.FieldData) (bool, error) {
+ role, err := b.roleEntry(req.Storage, data.Get("role_name").(string))
+ if err != nil {
+ return false, err
+ }
+ return role != nil, nil
+}
+
+// pathRoleList is used to list all the Roles registered with the backend.
+func (b *backend) pathRoleList(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ lock := b.roleLock("")
+
+ lock.RLock()
+ defer lock.RUnlock()
+
+ roles, err := req.Storage.List("role/")
+ if err != nil {
+ return nil, err
+ }
+ return logical.ListResponse(roles), nil
+}
+
+// pathRoleSecretIDList is used to list all the 'secret_id_accessor's issued against the role.
+func (b *backend) pathRoleSecretIDList(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ // Get the role entry
+ role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return logical.ErrorResponse(fmt.Sprintf("role %s does not exist", roleName)), nil
+ }
+
+ // Guard the list operation with an outer lock
+ b.secretIDListingLock.RLock()
+ defer b.secretIDListingLock.RUnlock()
+
+ roleNameHMAC, err := createHMAC(role.HMACKey, roleName)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create HMAC of role_name: %s", err)
+ }
+
+ // Listing works one level at a time. Get the first level of data
+ // which could then be used to get the actual SecretID storage entries.
+ secretIDHMACs, err := req.Storage.List(fmt.Sprintf("secret_id/%s/", roleNameHMAC))
+ if err != nil {
+ return nil, err
+ }
+
+ var listItems []string
+ for _, secretIDHMAC := range secretIDHMACs {
+ // For sanity
+ if secretIDHMAC == "" {
+ continue
+ }
+
+ // Prepare the full index of the SecretIDs.
+ entryIndex := fmt.Sprintf("secret_id/%s/%s", roleNameHMAC, secretIDHMAC)
+
+ // SecretID locks are not indexed by SecretIDs itself.
+ // This is because SecretIDs are not stored in plaintext
+ // form anywhere in the backend, and hence accessing its
+ // corresponding lock many times using SecretIDs is not
+ // possible. Also, indexing it everywhere using secretIDHMACs
+ // makes listing operation easier.
+ secretIDLock := b.secretIDLock(secretIDHMAC)
+
+ secretIDLock.RLock()
+
+ result := secretIDStorageEntry{}
+ if entry, err := req.Storage.Get(entryIndex); err != nil {
+ secretIDLock.RUnlock()
+ return nil, err
+ } else if entry == nil {
+ secretIDLock.RUnlock()
+ return nil, fmt.Errorf("storage entry for SecretID is present but no content found at the index")
+ } else if err := entry.DecodeJSON(&result); err != nil {
+ secretIDLock.RUnlock()
+ return nil, err
+ }
+ listItems = append(listItems, result.SecretIDAccessor)
+ secretIDLock.RUnlock()
+ }
+
+ return logical.ListResponse(listItems), nil
+}
+
+// validateRoleConstraints checks if the role has at least one constraint
+// enabled.
+func validateRoleConstraints(role *roleStorageEntry) error {
+ if role == nil {
+ return fmt.Errorf("nil role")
+ }
+
+ // At least one constraint should be enabled on the role
+ switch {
+ case role.BindSecretID:
+ case role.BoundCIDRList != "":
+ default:
+ return fmt.Errorf("at least one constraint should be enabled on the role")
+ }
+
+ return nil
+}
+
+// setRoleEntry grabs a write lock and stores the options on an role into the
+// storage. Also creates a reverse index from the role's RoleID to the role
+// itself.
+func (b *backend) setRoleEntry(s logical.Storage, roleName string, role *roleStorageEntry, previousRoleID string) error {
+ if roleName == "" {
+ return fmt.Errorf("missing role name")
+ }
+
+ if role == nil {
+ return fmt.Errorf("nil role")
+ }
+
+ // Check if role constraints are properly set
+ if err := validateRoleConstraints(role); err != nil {
+ return err
+ }
+
+ // Create a storage entry for the role
+ entry, err := logical.StorageEntryJSON("role/"+strings.ToLower(roleName), role)
+ if err != nil {
+ return err
+ }
+ if entry == nil {
+ return fmt.Errorf("failed to create storage entry for role %s", roleName)
+ }
+
+ // Check if the index from the role_id to role already exists
+ roleIDIndex, err := b.roleIDEntry(s, role.RoleID)
+ if err != nil {
+ return fmt.Errorf("failed to read role_id index: %v", err)
+ }
+
+ // If the entry exists, make sure that it belongs to the current role
+ if roleIDIndex != nil && roleIDIndex.Name != roleName {
+ return fmt.Errorf("role_id already in use")
+ }
+
+ // When role_id is getting updated, delete the old index before
+ // a new one is created
+ if previousRoleID != "" && previousRoleID != role.RoleID {
+ if err = b.roleIDEntryDelete(s, previousRoleID); err != nil {
+ return fmt.Errorf("failed to delete previous role ID index")
+ }
+ }
+
+ // Save the role entry only after all the validations
+ if err = s.Put(entry); err != nil {
+ return err
+ }
+
+ // If previousRoleID is still intact, don't create another one
+ if previousRoleID != "" && previousRoleID == role.RoleID {
+ return nil
+ }
+
+ // Create a storage entry for reverse mapping of RoleID to role.
+ // Note that secondary index is created when the roleLock is held.
+ return b.setRoleIDEntry(s, role.RoleID, &roleIDStorageEntry{
+ Name: roleName,
+ })
+}
+
+// roleEntry grabs the read lock and fetches the options of an role from the storage
+func (b *backend) roleEntry(s logical.Storage, roleName string) (*roleStorageEntry, error) {
+ if roleName == "" {
+ return nil, fmt.Errorf("missing role_name")
+ }
+
+ var role roleStorageEntry
+
+ lock := b.roleLock(roleName)
+
+ lock.RLock()
+ defer lock.RUnlock()
+
+ if entry, err := s.Get("role/" + strings.ToLower(roleName)); err != nil {
+ return nil, err
+ } else if entry == nil {
+ return nil, nil
+ } else if err := entry.DecodeJSON(&role); err != nil {
+ return nil, err
+ }
+
+ return &role, nil
+}
+
+// pathRoleCreateUpdate registers a new role with the backend or updates the options
+// of an existing role
+func (b *backend) pathRoleCreateUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ // Check if the role already exists
+ role, err := b.roleEntry(req.Storage, roleName)
+ if err != nil {
+ return nil, err
+ }
+
+ // Create a new entry object if this is a CreateOperation
+ if role == nil && req.Operation == logical.CreateOperation {
+ hmacKey, err := uuid.GenerateUUID()
+ if err != nil {
+ return nil, fmt.Errorf("failed to create role_id: %s\n", err)
+ }
+ role = &roleStorageEntry{
+ HMACKey: hmacKey,
+ }
+ } else if role == nil {
+ return nil, fmt.Errorf("role entry not found during update operation")
+ }
+
+ previousRoleID := role.RoleID
+ if roleIDRaw, ok := data.GetOk("role_id"); ok {
+ role.RoleID = roleIDRaw.(string)
+ } else if req.Operation == logical.CreateOperation {
+ roleID, err := uuid.GenerateUUID()
+ if err != nil {
+ return nil, fmt.Errorf("failed to generate role_id: %s\n", err)
+ }
+ role.RoleID = roleID
+ }
+ if role.RoleID == "" {
+ return logical.ErrorResponse("invalid role_id"), nil
+ }
+
+ if bindSecretIDRaw, ok := data.GetOk("bind_secret_id"); ok {
+ role.BindSecretID = bindSecretIDRaw.(bool)
+ } else if req.Operation == logical.CreateOperation {
+ role.BindSecretID = data.Get("bind_secret_id").(bool)
+ }
+
+ if boundCIDRListRaw, ok := data.GetOk("bound_cidr_list"); ok {
+ role.BoundCIDRList = strings.TrimSpace(boundCIDRListRaw.(string))
+ } else if req.Operation == logical.CreateOperation {
+ role.BoundCIDRList = data.Get("bound_cidr_list").(string)
+ }
+
+ if role.BoundCIDRList != "" {
+ valid, err := cidrutil.ValidateCIDRListString(role.BoundCIDRList, ",")
+ if err != nil {
+ return nil, fmt.Errorf("failed to validate CIDR blocks: %v", err)
+ }
+ if !valid {
+ return logical.ErrorResponse("invalid CIDR blocks"), nil
+ }
+ }
+
+ if policiesRaw, ok := data.GetOk("policies"); ok {
+ role.Policies = policyutil.ParsePolicies(policiesRaw.(string))
+ } else if req.Operation == logical.CreateOperation {
+ role.Policies = policyutil.ParsePolicies(data.Get("policies").(string))
+ }
+
+ periodRaw, ok := data.GetOk("period")
+ if ok {
+ role.Period = time.Second * time.Duration(periodRaw.(int))
+ } else if req.Operation == logical.CreateOperation {
+ role.Period = time.Second * time.Duration(data.Get("period").(int))
+ }
+ if role.Period > b.System().MaxLeaseTTL() {
+ return logical.ErrorResponse(fmt.Sprintf("'period' of '%s' is greater than the backend's maximum lease TTL of '%s'", role.Period.String(), b.System().MaxLeaseTTL().String())), nil
+ }
+
+ if secretIDNumUsesRaw, ok := data.GetOk("secret_id_num_uses"); ok {
+ role.SecretIDNumUses = secretIDNumUsesRaw.(int)
+ } else if req.Operation == logical.CreateOperation {
+ role.SecretIDNumUses = data.Get("secret_id_num_uses").(int)
+ }
+ if role.SecretIDNumUses < 0 {
+ return logical.ErrorResponse("secret_id_num_uses cannot be negative"), nil
+ }
+
+ if secretIDTTLRaw, ok := data.GetOk("secret_id_ttl"); ok {
+ role.SecretIDTTL = time.Second * time.Duration(secretIDTTLRaw.(int))
+ } else if req.Operation == logical.CreateOperation {
+ role.SecretIDTTL = time.Second * time.Duration(data.Get("secret_id_ttl").(int))
+ }
+
+ if tokenNumUsesRaw, ok := data.GetOk("token_num_uses"); ok {
+ role.TokenNumUses = tokenNumUsesRaw.(int)
+ } else if req.Operation == logical.CreateOperation {
+ role.TokenNumUses = data.Get("token_num_uses").(int)
+ }
+ if role.TokenNumUses < 0 {
+ return logical.ErrorResponse("token_num_uses cannot be negative"), nil
+ }
+
+ if tokenTTLRaw, ok := data.GetOk("token_ttl"); ok {
+ role.TokenTTL = time.Second * time.Duration(tokenTTLRaw.(int))
+ } else if req.Operation == logical.CreateOperation {
+ role.TokenTTL = time.Second * time.Duration(data.Get("token_ttl").(int))
+ }
+
+ if tokenMaxTTLRaw, ok := data.GetOk("token_max_ttl"); ok {
+ role.TokenMaxTTL = time.Second * time.Duration(tokenMaxTTLRaw.(int))
+ } else if req.Operation == logical.CreateOperation {
+ role.TokenMaxTTL = time.Second * time.Duration(data.Get("token_max_ttl").(int))
+ }
+
+ // Check that the TokenTTL value provided is less than the TokenMaxTTL.
+ // Sanitizing the TTL and MaxTTL is not required now and can be performed
+ // at credential issue time.
+ if role.TokenMaxTTL > time.Duration(0) && role.TokenTTL > role.TokenMaxTTL {
+ return logical.ErrorResponse("token_ttl should not be greater than token_max_ttl"), nil
+ }
+
+ var resp *logical.Response
+ if role.TokenMaxTTL > b.System().MaxLeaseTTL() {
+ resp = &logical.Response{}
+ resp.AddWarning("token_max_ttl is greater than the backend mount's maximum TTL value; issued tokens' max TTL value will be truncated")
+ }
+
+ // Store the entry.
+ return resp, b.setRoleEntry(req.Storage, roleName, role, previousRoleID)
+}
+
+// pathRoleRead grabs a read lock and reads the options set on the role from the storage
+func (b *backend) pathRoleRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ if role, err := b.roleEntry(req.Storage, strings.ToLower(roleName)); err != nil {
+ return nil, err
+ } else if role == nil {
+ return nil, nil
+ } else {
+ // Convert the 'time.Duration' values to second.
+ role.SecretIDTTL /= time.Second
+ role.TokenTTL /= time.Second
+ role.TokenMaxTTL /= time.Second
+ role.Period /= time.Second
+
+ // Create a map of data to be returned and remove sensitive information from it
+ data := structs.New(role).Map()
+ delete(data, "role_id")
+ delete(data, "hmac_key")
+
+ resp := &logical.Response{
+ Data: data,
+ }
+
+ if err := validateRoleConstraints(role); err != nil {
+ resp.AddWarning("Role does not have any constraints set on it. Updates to this role will require a constraint to be set")
+ }
+
+ return resp, nil
+ }
+}
+
+// pathRoleDelete removes the role from the storage
+func (b *backend) pathRoleDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ // Acquire the lock before deleting the secrets.
+ lock := b.roleLock(roleName)
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Just before the role is deleted, remove all the SecretIDs issued as part of the role.
+ if err = b.flushRoleSecrets(req.Storage, roleName, role.HMACKey); err != nil {
+ return nil, fmt.Errorf("failed to invalidate the secrets belonging to role '%s': %s", roleName, err)
+ }
+
+ // Delete the reverse mapping from RoleID to the role
+ if err = b.roleIDEntryDelete(req.Storage, role.RoleID); err != nil {
+ return nil, fmt.Errorf("failed to delete the mapping from RoleID to role '%s': %s", roleName, err)
+ }
+
+ // After deleting the SecretIDs and the RoleID, delete the role itself
+ if err = req.Storage.Delete("role/" + strings.ToLower(roleName)); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+// Returns the properties of the SecretID
+func (b *backend) pathRoleSecretIDLookupUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ secretID := data.Get("secret_id").(string)
+ if secretID == "" {
+ return logical.ErrorResponse("missing secret_id"), nil
+ }
+
+ // Fetch the role
+ role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, fmt.Errorf("role %s does not exist", roleName)
+ }
+
+ // Create the HMAC of the secret ID using the per-role HMAC key
+ secretIDHMAC, err := createHMAC(role.HMACKey, secretID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create HMAC of secret_id: %s", err)
+ }
+
+ // Create the HMAC of the roleName using the per-role HMAC key
+ roleNameHMAC, err := createHMAC(role.HMACKey, roleName)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create HMAC of role_name: %s", err)
+ }
+
+ // Create the index at which the secret_id would've been stored
+ entryIndex := fmt.Sprintf("secret_id/%s/%s", roleNameHMAC, secretIDHMAC)
+
+ return b.secretIDCommon(req.Storage, entryIndex, secretIDHMAC)
+}
+
+func (b *backend) secretIDCommon(s logical.Storage, entryIndex, secretIDHMAC string) (*logical.Response, error) {
+ lock := b.secretIDLock(secretIDHMAC)
+ lock.RLock()
+ defer lock.RUnlock()
+
+ result := secretIDStorageEntry{}
+ if entry, err := s.Get(entryIndex); err != nil {
+ return nil, err
+ } else if entry == nil {
+ return nil, nil
+ } else if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ result.SecretIDTTL /= time.Second
+ d := structs.New(result).Map()
+
+ // Converting the time values to RFC3339Nano format.
+ //
+ // Map() from 'structs' package formats time in RFC3339Nano.
+ // In order to not break the API due to a modification in the
+ // third party package, converting the time values again.
+ d["creation_time"] = result.CreationTime.Format(time.RFC3339Nano)
+ d["expiration_time"] = result.ExpirationTime.Format(time.RFC3339Nano)
+ d["last_updated_time"] = result.LastUpdatedTime.Format(time.RFC3339Nano)
+
+ resp := &logical.Response{
+ Data: d,
+ }
+
+ if _, ok := d["SecretIDNumUses"]; ok {
+ resp.AddWarning("The field SecretIDNumUses is deprecated and will be removed in a future release; refer to secret_id_num_uses instead")
+ }
+
+ return resp, nil
+}
+
+func (b *backend) pathRoleSecretIDDestroyUpdateDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ secretID := data.Get("secret_id").(string)
+ if secretID == "" {
+ return logical.ErrorResponse("missing secret_id"), nil
+ }
+
+ role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, fmt.Errorf("role %s does not exist", roleName)
+ }
+
+ secretIDHMAC, err := createHMAC(role.HMACKey, secretID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create HMAC of secret_id: %s", err)
+ }
+
+ roleNameHMAC, err := createHMAC(role.HMACKey, roleName)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create HMAC of role_name: %s", err)
+ }
+
+ entryIndex := fmt.Sprintf("secret_id/%s/%s", roleNameHMAC, secretIDHMAC)
+
+ lock := b.secretIDLock(secretIDHMAC)
+ lock.Lock()
+ defer lock.Unlock()
+
+ result := secretIDStorageEntry{}
+ if entry, err := req.Storage.Get(entryIndex); err != nil {
+ return nil, err
+ } else if entry == nil {
+ return nil, nil
+ } else if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ // Delete the accessor of the SecretID first
+ if err := b.deleteSecretIDAccessorEntry(req.Storage, result.SecretIDAccessor); err != nil {
+ return nil, err
+ }
+
+ // Delete the storage entry that corresponds to the SecretID
+ if err := req.Storage.Delete(entryIndex); err != nil {
+ return nil, fmt.Errorf("failed to delete SecretID: %s", err)
+ }
+
+ return nil, nil
+}
+
+// pathRoleSecretIDAccessorLookupUpdate returns the properties of the SecretID
+// given its accessor
+func (b *backend) pathRoleSecretIDAccessorLookupUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ secretIDAccessor := data.Get("secret_id_accessor").(string)
+ if secretIDAccessor == "" {
+ return logical.ErrorResponse("missing secret_id_accessor"), nil
+ }
+
+ // SecretID is indexed based on HMACed roleName and HMACed SecretID.
+ // Get the role details to fetch the RoleID and accessor to get
+ // the HMACed SecretID.
+
+ role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, fmt.Errorf("role %s does not exist", roleName)
+ }
+
+ accessorEntry, err := b.secretIDAccessorEntry(req.Storage, secretIDAccessor)
+ if err != nil {
+ return nil, err
+ }
+ if accessorEntry == nil {
+ return nil, fmt.Errorf("failed to find accessor entry for secret_id_accessor:%s\n", secretIDAccessor)
+ }
+
+ roleNameHMAC, err := createHMAC(role.HMACKey, roleName)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create HMAC of role_name: %s", err)
+ }
+
+ entryIndex := fmt.Sprintf("secret_id/%s/%s", roleNameHMAC, accessorEntry.SecretIDHMAC)
+
+ return b.secretIDCommon(req.Storage, entryIndex, accessorEntry.SecretIDHMAC)
+}
+
+func (b *backend) pathRoleSecretIDAccessorDestroyUpdateDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ secretIDAccessor := data.Get("secret_id_accessor").(string)
+ if secretIDAccessor == "" {
+ return logical.ErrorResponse("missing secret_id_accessor"), nil
+ }
+
+ // SecretID is indexed based on HMACed roleName and HMACed SecretID.
+ // Get the role details to fetch the RoleID and accessor to get
+ // the HMACed SecretID.
+
+ role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, fmt.Errorf("role %s does not exist", roleName)
+ }
+
+ accessorEntry, err := b.secretIDAccessorEntry(req.Storage, secretIDAccessor)
+ if err != nil {
+ return nil, err
+ }
+ if accessorEntry == nil {
+ return nil, fmt.Errorf("failed to find accessor entry for secret_id_accessor:%s\n", secretIDAccessor)
+ }
+
+ roleNameHMAC, err := createHMAC(role.HMACKey, roleName)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create HMAC of role_name: %s", err)
+ }
+
+ entryIndex := fmt.Sprintf("secret_id/%s/%s", roleNameHMAC, accessorEntry.SecretIDHMAC)
+
+ lock := b.secretIDLock(accessorEntry.SecretIDHMAC)
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Delete the accessor of the SecretID first
+ if err := b.deleteSecretIDAccessorEntry(req.Storage, secretIDAccessor); err != nil {
+ return nil, err
+ }
+
+ // Delete the storage entry that corresponds to the SecretID
+ if err := req.Storage.Delete(entryIndex); err != nil {
+ return nil, fmt.Errorf("failed to delete SecretID: %s", err)
+ }
+
+ return nil, nil
+}
+
+func (b *backend) pathRoleBoundCIDRListUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ lock := b.roleLock(roleName)
+
+ lock.Lock()
+ defer lock.Unlock()
+
+ role.BoundCIDRList = strings.TrimSpace(data.Get("bound_cidr_list").(string))
+ if role.BoundCIDRList == "" {
+ return logical.ErrorResponse("missing bound_cidr_list"), nil
+ }
+
+ if role.BoundCIDRList != "" {
+ valid, err := cidrutil.ValidateCIDRListString(role.BoundCIDRList, ",")
+ if err != nil {
+ return nil, fmt.Errorf("failed to validate CIDR blocks: %q", err)
+ }
+ if !valid {
+ return logical.ErrorResponse("failed to validate CIDR blocks"), nil
+ }
+ }
+
+ return nil, b.setRoleEntry(req.Storage, roleName, role, "")
+}
+
+func (b *backend) pathRoleBoundCIDRListRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ if role, err := b.roleEntry(req.Storage, strings.ToLower(roleName)); err != nil {
+ return nil, err
+ } else if role == nil {
+ return nil, nil
+ } else {
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "bound_cidr_list": role.BoundCIDRList,
+ },
+ }, nil
+ }
+}
+
+func (b *backend) pathRoleBoundCIDRListDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ lock := b.roleLock(roleName)
+
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Deleting a field implies setting the value to it's default value.
+ role.BoundCIDRList = data.GetDefaultOrZero("bound_cidr_list").(string)
+
+ return nil, b.setRoleEntry(req.Storage, roleName, role, "")
+}
+
+func (b *backend) pathRoleBindSecretIDUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ lock := b.roleLock(roleName)
+
+ lock.Lock()
+ defer lock.Unlock()
+
+ if bindSecretIDRaw, ok := data.GetOk("bind_secret_id"); ok {
+ role.BindSecretID = bindSecretIDRaw.(bool)
+ return nil, b.setRoleEntry(req.Storage, roleName, role, "")
+ } else {
+ return logical.ErrorResponse("missing bind_secret_id"), nil
+ }
+}
+
+func (b *backend) pathRoleBindSecretIDRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ if role, err := b.roleEntry(req.Storage, strings.ToLower(roleName)); err != nil {
+ return nil, err
+ } else if role == nil {
+ return nil, nil
+ } else {
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "bind_secret_id": role.BindSecretID,
+ },
+ }, nil
+ }
+}
+
+func (b *backend) pathRoleBindSecretIDDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ lock := b.roleLock(roleName)
+
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Deleting a field implies setting the value to it's default value.
+ role.BindSecretID = data.GetDefaultOrZero("bind_secret_id").(bool)
+
+ return nil, b.setRoleEntry(req.Storage, roleName, role, "")
+}
+
+func (b *backend) pathRolePoliciesUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ policies := strings.TrimSpace(data.Get("policies").(string))
+ if policies == "" {
+ return logical.ErrorResponse("missing policies"), nil
+ }
+
+ lock := b.roleLock(roleName)
+
+ lock.Lock()
+ defer lock.Unlock()
+
+ role.Policies = policyutil.ParsePolicies(policies)
+
+ return nil, b.setRoleEntry(req.Storage, roleName, role, "")
+}
+
+func (b *backend) pathRolePoliciesRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ if role, err := b.roleEntry(req.Storage, strings.ToLower(roleName)); err != nil {
+ return nil, err
+ } else if role == nil {
+ return nil, nil
+ } else {
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "policies": role.Policies,
+ },
+ }, nil
+ }
+}
+
+func (b *backend) pathRolePoliciesDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ lock := b.roleLock(roleName)
+
+ lock.Lock()
+ defer lock.Unlock()
+
+ role.Policies = policyutil.ParsePolicies(data.GetDefaultOrZero("policies").(string))
+
+ return nil, b.setRoleEntry(req.Storage, roleName, role, "")
+}
+
+func (b *backend) pathRoleSecretIDNumUsesUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ lock := b.roleLock(roleName)
+
+ lock.Lock()
+ defer lock.Unlock()
+
+ if numUsesRaw, ok := data.GetOk("secret_id_num_uses"); ok {
+ role.SecretIDNumUses = numUsesRaw.(int)
+ if role.SecretIDNumUses < 0 {
+ return logical.ErrorResponse("secret_id_num_uses cannot be negative"), nil
+ }
+ return nil, b.setRoleEntry(req.Storage, roleName, role, "")
+ } else {
+ return logical.ErrorResponse("missing secret_id_num_uses"), nil
+ }
+}
+
+func (b *backend) pathRoleRoleIDUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ lock := b.roleLock(roleName)
+
+ lock.Lock()
+ defer lock.Unlock()
+
+ previousRoleID := role.RoleID
+ role.RoleID = data.Get("role_id").(string)
+ if role.RoleID == "" {
+ return logical.ErrorResponse("missing role_id"), nil
+ }
+
+ return nil, b.setRoleEntry(req.Storage, roleName, role, previousRoleID)
+}
+
+func (b *backend) pathRoleRoleIDRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ if role, err := b.roleEntry(req.Storage, strings.ToLower(roleName)); err != nil {
+ return nil, err
+ } else if role == nil {
+ return nil, nil
+ } else {
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "role_id": role.RoleID,
+ },
+ }, nil
+ }
+}
+
+func (b *backend) pathRoleSecretIDNumUsesRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ if role, err := b.roleEntry(req.Storage, strings.ToLower(roleName)); err != nil {
+ return nil, err
+ } else if role == nil {
+ return nil, nil
+ } else {
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "secret_id_num_uses": role.SecretIDNumUses,
+ },
+ }, nil
+ }
+}
+
+func (b *backend) pathRoleSecretIDNumUsesDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ lock := b.roleLock(roleName)
+
+ lock.Lock()
+ defer lock.Unlock()
+
+ role.SecretIDNumUses = data.GetDefaultOrZero("secret_id_num_uses").(int)
+
+ return nil, b.setRoleEntry(req.Storage, roleName, role, "")
+}
+
+func (b *backend) pathRoleSecretIDTTLUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ lock := b.roleLock(roleName)
+
+ lock.Lock()
+ defer lock.Unlock()
+
+ if secretIDTTLRaw, ok := data.GetOk("secret_id_ttl"); ok {
+ role.SecretIDTTL = time.Second * time.Duration(secretIDTTLRaw.(int))
+ return nil, b.setRoleEntry(req.Storage, roleName, role, "")
+ } else {
+ return logical.ErrorResponse("missing secret_id_ttl"), nil
+ }
+}
+
+func (b *backend) pathRoleSecretIDTTLRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ if role, err := b.roleEntry(req.Storage, strings.ToLower(roleName)); err != nil {
+ return nil, err
+ } else if role == nil {
+ return nil, nil
+ } else {
+ role.SecretIDTTL /= time.Second
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "secret_id_ttl": role.SecretIDTTL,
+ },
+ }, nil
+ }
+}
+
+func (b *backend) pathRoleSecretIDTTLDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ lock := b.roleLock(roleName)
+
+ lock.Lock()
+ defer lock.Unlock()
+
+ role.SecretIDTTL = time.Second * time.Duration(data.GetDefaultOrZero("secret_id_ttl").(int))
+
+ return nil, b.setRoleEntry(req.Storage, roleName, role, "")
+}
+
+func (b *backend) pathRolePeriodUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ lock := b.roleLock(roleName)
+
+ lock.Lock()
+ defer lock.Unlock()
+
+ if periodRaw, ok := data.GetOk("period"); ok {
+ role.Period = time.Second * time.Duration(periodRaw.(int))
+ if role.Period > b.System().MaxLeaseTTL() {
+ return logical.ErrorResponse(fmt.Sprintf("'period' of '%s' is greater than the backend's maximum lease TTL of '%s'", role.Period.String(), b.System().MaxLeaseTTL().String())), nil
+ }
+ return nil, b.setRoleEntry(req.Storage, roleName, role, "")
+ } else {
+ return logical.ErrorResponse("missing period"), nil
+ }
+}
+
+func (b *backend) pathRolePeriodRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ if role, err := b.roleEntry(req.Storage, strings.ToLower(roleName)); err != nil {
+ return nil, err
+ } else if role == nil {
+ return nil, nil
+ } else {
+ role.Period /= time.Second
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "period": role.Period,
+ },
+ }, nil
+ }
+}
+
+func (b *backend) pathRolePeriodDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ lock := b.roleLock(roleName)
+
+ lock.Lock()
+ defer lock.Unlock()
+
+ role.Period = time.Second * time.Duration(data.GetDefaultOrZero("period").(int))
+
+ return nil, b.setRoleEntry(req.Storage, roleName, role, "")
+}
+
+func (b *backend) pathRoleTokenNumUsesUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ lock := b.roleLock(roleName)
+
+ lock.Lock()
+ defer lock.Unlock()
+
+ if tokenNumUsesRaw, ok := data.GetOk("token_num_uses"); ok {
+ role.TokenNumUses = tokenNumUsesRaw.(int)
+ return nil, b.setRoleEntry(req.Storage, roleName, role, "")
+ } else {
+ return logical.ErrorResponse("missing token_num_uses"), nil
+ }
+}
+
+func (b *backend) pathRoleTokenNumUsesRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ if role, err := b.roleEntry(req.Storage, strings.ToLower(roleName)); err != nil {
+ return nil, err
+ } else if role == nil {
+ return nil, nil
+ } else {
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "token_num_uses": role.TokenNumUses,
+ },
+ }, nil
+ }
+}
+
+func (b *backend) pathRoleTokenNumUsesDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ lock := b.roleLock(roleName)
+
+ lock.Lock()
+ defer lock.Unlock()
+
+ role.TokenNumUses = data.GetDefaultOrZero("token_num_uses").(int)
+
+ return nil, b.setRoleEntry(req.Storage, roleName, role, "")
+}
+
+func (b *backend) pathRoleTokenTTLUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ lock := b.roleLock(roleName)
+
+ lock.Lock()
+ defer lock.Unlock()
+
+ if tokenTTLRaw, ok := data.GetOk("token_ttl"); ok {
+ role.TokenTTL = time.Second * time.Duration(tokenTTLRaw.(int))
+ if role.TokenMaxTTL > time.Duration(0) && role.TokenTTL > role.TokenMaxTTL {
+ return logical.ErrorResponse("token_ttl should not be greater than token_max_ttl"), nil
+ }
+ return nil, b.setRoleEntry(req.Storage, roleName, role, "")
+ } else {
+ return logical.ErrorResponse("missing token_ttl"), nil
+ }
+}
+
+func (b *backend) pathRoleTokenTTLRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ if role, err := b.roleEntry(req.Storage, strings.ToLower(roleName)); err != nil {
+ return nil, err
+ } else if role == nil {
+ return nil, nil
+ } else {
+ role.TokenTTL /= time.Second
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "token_ttl": role.TokenTTL,
+ },
+ }, nil
+ }
+}
+
+func (b *backend) pathRoleTokenTTLDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ lock := b.roleLock(roleName)
+
+ lock.Lock()
+ defer lock.Unlock()
+
+ role.TokenTTL = time.Second * time.Duration(data.GetDefaultOrZero("token_ttl").(int))
+
+ return nil, b.setRoleEntry(req.Storage, roleName, role, "")
+}
+
+func (b *backend) pathRoleTokenMaxTTLUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ lock := b.roleLock(roleName)
+
+ lock.Lock()
+ defer lock.Unlock()
+
+ if tokenMaxTTLRaw, ok := data.GetOk("token_max_ttl"); ok {
+ role.TokenMaxTTL = time.Second * time.Duration(tokenMaxTTLRaw.(int))
+ if role.TokenMaxTTL > time.Duration(0) && role.TokenTTL > role.TokenMaxTTL {
+ return logical.ErrorResponse("token_max_ttl should be greater than or equal to token_ttl"), nil
+ }
+ return nil, b.setRoleEntry(req.Storage, roleName, role, "")
+ } else {
+ return logical.ErrorResponse("missing token_max_ttl"), nil
+ }
+}
+
+func (b *backend) pathRoleTokenMaxTTLRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ if role, err := b.roleEntry(req.Storage, strings.ToLower(roleName)); err != nil {
+ return nil, err
+ } else if role == nil {
+ return nil, nil
+ } else {
+ role.TokenMaxTTL /= time.Second
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "token_max_ttl": role.TokenMaxTTL,
+ },
+ }, nil
+ }
+}
+
+func (b *backend) pathRoleTokenMaxTTLDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ lock := b.roleLock(roleName)
+
+ lock.Lock()
+ defer lock.Unlock()
+
+ role.TokenMaxTTL = time.Second * time.Duration(data.GetDefaultOrZero("token_max_ttl").(int))
+
+ return nil, b.setRoleEntry(req.Storage, roleName, role, "")
+}
+
+func (b *backend) pathRoleSecretIDUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ secretID, err := uuid.GenerateUUID()
+ if err != nil {
+ return nil, fmt.Errorf("failed to generate SecretID:%s", err)
+ }
+ return b.handleRoleSecretIDCommon(req, data, secretID)
+}
+
+func (b *backend) pathRoleCustomSecretIDUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ return b.handleRoleSecretIDCommon(req, data, data.Get("secret_id").(string))
+}
+
+func (b *backend) handleRoleSecretIDCommon(req *logical.Request, data *framework.FieldData, secretID string) (*logical.Response, error) {
+ roleName := data.Get("role_name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role_name"), nil
+ }
+
+ if secretID == "" {
+ return logical.ErrorResponse("missing secret_id"), nil
+ }
+
+ role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return logical.ErrorResponse(fmt.Sprintf("role %s does not exist", roleName)), nil
+ }
+
+ if !role.BindSecretID {
+ return logical.ErrorResponse("bind_secret_id is not set on the role"), nil
+ }
+
+ cidrList := data.Get("cidr_list").(string)
+
+ // Validate the list of CIDR blocks
+ if cidrList != "" {
+ valid, err := cidrutil.ValidateCIDRListString(cidrList, ",")
+ if err != nil {
+ return nil, fmt.Errorf("failed to validate CIDR blocks: %q", err)
+ }
+ if !valid {
+ return logical.ErrorResponse("failed to validate CIDR blocks"), nil
+ }
+ }
+
+ // Parse the CIDR blocks into a slice
+ secretIDCIDRs := strutil.ParseDedupLowercaseAndSortStrings(cidrList, ",")
+
+ // Ensure that the CIDRs on the secret ID are a subset of that of role's
+ if err := verifyCIDRRoleSecretIDSubset(secretIDCIDRs, role.BoundCIDRList); err != nil {
+ return nil, err
+ }
+
+ secretIDStorage := &secretIDStorageEntry{
+ SecretIDNumUses: role.SecretIDNumUses,
+ SecretIDTTL: role.SecretIDTTL,
+ Metadata: make(map[string]string),
+ CIDRList: secretIDCIDRs,
+ }
+
+ if err = strutil.ParseArbitraryKeyValues(data.Get("metadata").(string), secretIDStorage.Metadata, ","); err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("failed to parse metadata: %v", err)), nil
+ }
+
+ if secretIDStorage, err = b.registerSecretIDEntry(req.Storage, roleName, secretID, role.HMACKey, secretIDStorage); err != nil {
+ return nil, fmt.Errorf("failed to store SecretID: %s", err)
+ }
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "secret_id": secretID,
+ "secret_id_accessor": secretIDStorage.SecretIDAccessor,
+ },
+ }, nil
+}
+
+func (b *backend) roleIDLock(roleID string) *locksutil.LockEntry {
+ return locksutil.LockForKey(b.roleIDLocks, roleID)
+}
+
+func (b *backend) roleLock(roleName string) *locksutil.LockEntry {
+ return locksutil.LockForKey(b.roleLocks, roleName)
+}
+
+// setRoleIDEntry creates a storage entry that maps RoleID to Role
+func (b *backend) setRoleIDEntry(s logical.Storage, roleID string, roleIDEntry *roleIDStorageEntry) error {
+ lock := b.roleIDLock(roleID)
+ lock.Lock()
+ defer lock.Unlock()
+
+ entryIndex := "role_id/" + b.salt.SaltID(roleID)
+
+ entry, err := logical.StorageEntryJSON(entryIndex, roleIDEntry)
+ if err != nil {
+ return err
+ }
+ if err = s.Put(entry); err != nil {
+ return err
+ }
+ return nil
+}
+
+// roleIDEntry is used to read the storage entry that maps RoleID to Role
+func (b *backend) roleIDEntry(s logical.Storage, roleID string) (*roleIDStorageEntry, error) {
+ if roleID == "" {
+ return nil, fmt.Errorf("missing roleID")
+ }
+
+ lock := b.roleIDLock(roleID)
+ lock.RLock()
+ defer lock.RUnlock()
+
+ var result roleIDStorageEntry
+
+ entryIndex := "role_id/" + b.salt.SaltID(roleID)
+
+ if entry, err := s.Get(entryIndex); err != nil {
+ return nil, err
+ } else if entry == nil {
+ return nil, nil
+ } else if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+// roleIDEntryDelete is used to remove the secondary index that maps the
+// RoleID to the Role itself.
+func (b *backend) roleIDEntryDelete(s logical.Storage, roleID string) error {
+ if roleID == "" {
+ return fmt.Errorf("missing roleID")
+ }
+
+ lock := b.roleIDLock(roleID)
+ lock.Lock()
+ defer lock.Unlock()
+
+ entryIndex := "role_id/" + b.salt.SaltID(roleID)
+
+ return s.Delete(entryIndex)
+}
+
+var roleHelp = map[string][2]string{
+ "role-list": {
+ "Lists all the roles registered with the backend.",
+ "The list will contain the names of the roles.",
+ },
+ "role": {
+ "Register an role with the backend.",
+ `A role can represent a service, a machine or anything that can be IDed.
+The set of policies on the role defines access to the role, meaning, any
+Vault token with a policy set that is a superset of the policies on the
+role registered here will have access to the role. If a SecretID is desired
+to be generated against only this specific role, it can be done via
+'role//secret-id' and 'role//custom-secret-id' endpoints.
+The properties of the SecretID created against the role and the properties
+of the token issued with the SecretID generated againt the role, can be
+configured using the parameters of this endpoint.`,
+ },
+ "role-bind-secret-id": {
+ "Impose secret_id to be presented during login using this role.",
+ `By setting this to 'true', during login the parameter 'secret_id' becomes a mandatory argument.
+The value of 'secret_id' can be retrieved using 'role//secret-id' endpoint.`,
+ },
+ "role-bound-cidr-list": {
+ `Comma separated list of CIDR blocks, if set, specifies blocks of IP
+addresses which can perform the login operation`,
+ `During login, the IP address of the client will be checked to see if it
+belongs to the CIDR blocks specified. If CIDR blocks were set and if the
+IP is not encompassed by it, login fails`,
+ },
+ "role-policies": {
+ "Policies of the role.",
+ `A comma-delimited set of Vault policies that defines access to the role.
+All the Vault tokens with policies that encompass the policy set
+defined on the role, can access the role.`,
+ },
+ "role-secret-id-num-uses": {
+ "Use limit of the SecretID generated against the role.",
+ `If the SecretIDs are generated/assigned against the role using the
+'role//secret-id' or 'role//custom-secret-id' endpoints,
+then the number of times that SecretID can access the role is defined by
+this option.`,
+ },
+ "role-secret-id-ttl": {
+ `Duration in seconds, representing the lifetime of the SecretIDs
+that are generated against the role using 'role//secret-id' or
+'role//custom-secret-id' endpoints.`,
+ ``,
+ },
+ "role-secret-id-lookup": {
+ "Read the properties of an issued secret_id",
+ `This endpoint is used to read the properties of a secret_id associated to a
+role.`},
+ "role-secret-id-destroy": {
+ "Invalidate an issued secret_id",
+ `This endpoint is used to delete the properties of a secret_id associated to a
+role.`},
+ "role-secret-id-accessor-lookup": {
+ "Read an issued secret_id, using its accessor",
+ `This is particularly useful to lookup the non-expiring 'secret_id's.
+The list operation on the 'role//secret-id' endpoint will return
+the 'secret_id_accessor's. This endpoint can be used to read the properties
+of the secret. If the 'secret_id_num_uses' field in the response is 0, it
+represents a non-expiring 'secret_id'.`,
+ },
+ "role-secret-id-accessor-destroy": {
+ "Delete an issued secret_id, using its accessor",
+ `This is particularly useful to clean-up the non-expiring 'secret_id's.
+The list operation on the 'role//secret-id' endpoint will return
+the 'secret_id_accessor's. This endpoint can be used to read the properties
+of the secret. If the 'secret_id_num_uses' field in the response is 0, it
+represents a non-expiring 'secret_id'.`,
+ },
+ "role-token-num-uses": {
+ "Number of times issued tokens can be used",
+ `By default, this will be set to zero, indicating that the issued
+tokens can be used any number of times.`,
+ },
+ "role-token-ttl": {
+ `Duration in seconds, the lifetime of the token issued by using the SecretID that
+is generated against this role, before which the token needs to be renewed.`,
+ `If SecretIDs are generated against the role, using 'role//secret-id' or the
+'role//custom-secret-id' endpoints, and if those SecretIDs are used
+to perform the login operation, then the value of 'token-ttl' defines the
+lifetime of the token issued, before which the token needs to be renewed.`,
+ },
+ "role-token-max-ttl": {
+ `Duration in seconds, the maximum lifetime of the tokens issued by using
+the SecretIDs that were generated against this role, after which the
+tokens are not allowed to be renewed.`,
+ `If SecretIDs are generated against the role using 'role//secret-id'
+or the 'role//custom-secret-id' endpoints, and if those SecretIDs
+are used to perform the login operation, then the value of 'token-max-ttl'
+defines the maximum lifetime of the tokens issued, after which the tokens
+cannot be renewed. A reauthentication is required after this duration.
+This value will be capped by the backend mount's maximum TTL value.`,
+ },
+ "role-id": {
+ "Returns the 'role_id' of the role.",
+ `If login is performed from an role, then its 'role_id' should be presented
+as a credential during the login. This 'role_id' can be retrieved using
+this endpoint.`,
+ },
+ "role-secret-id": {
+ "Generate a SecretID against this role.",
+ `The SecretID generated using this endpoint will be scoped to access
+just this role and none else. The properties of this SecretID will be
+based on the options set on the role. It will expire after a period
+defined by the 'secret_id_ttl' option on the role and/or the backend
+mount's maximum TTL value.`,
+ },
+ "role-custom-secret-id": {
+ "Assign a SecretID of choice against the role.",
+ `This option is not recommended unless there is a specific need
+to do so. This will assign a client supplied SecretID to be used to access
+the role. This SecretID will behave similarly to the SecretIDs generated by
+the backend. The properties of this SecretID will be based on the options
+set on the role. It will expire after a period defined by the 'secret_id_ttl'
+option on the role and/or the backend mount's maximum TTL value.`,
+ },
+ "role-period": {
+ "Updates the value of 'period' on the role",
+ `If set, indicates that the token generated using this role
+should never expire. The token should be renewed within the
+duration specified by this value. The renewal duration will
+be fixed. If the Period in the role is modified, the token
+will pick up the new value during its next renewal.`,
+ },
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_role_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_role_test.go
new file mode 100644
index 0000000..a40cbe1
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_role_test.go
@@ -0,0 +1,1067 @@
+package approle
+
+import (
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/helper/policyutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/mitchellh/mapstructure"
+)
+
+func TestAppRole_CIDRSubset(t *testing.T) {
+ var resp *logical.Response
+ var err error
+
+ b, storage := createBackendWithStorage(t)
+
+ roleData := map[string]interface{}{
+ "role_id": "role-id-123",
+ "policies": "a,b",
+ "bound_cidr_list": "127.0.0.1/24",
+ }
+
+ roleReq := &logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "role/testrole1",
+ Storage: storage,
+ Data: roleData,
+ }
+
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err: %v resp: %#v", err, resp)
+ }
+
+ secretIDData := map[string]interface{}{
+ "cidr_list": "127.0.0.1/16",
+ }
+ secretIDReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Storage: storage,
+ Path: "role/testrole1/secret-id",
+ Data: secretIDData,
+ }
+
+ resp, err = b.HandleRequest(secretIDReq)
+ if resp != nil || resp.IsError() {
+ t.Fatalf("resp:%#v", resp)
+ }
+ if err == nil {
+ t.Fatal("expected an error")
+ }
+
+ roleData["bound_cidr_list"] = "192.168.27.29/16,172.245.30.40/24,10.20.30.40/30"
+ roleReq.Operation = logical.UpdateOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err: %v resp: %#v", err, resp)
+ }
+
+ secretIDData["cidr_list"] = "192.168.27.29/20,172.245.30.40/25,10.20.30.40/32"
+ resp, err = b.HandleRequest(secretIDReq)
+ if resp != nil && resp.IsError() {
+ t.Fatalf("resp: %#v", resp)
+ }
+}
+
+func TestAppRole_RoleConstraints(t *testing.T) {
+ var resp *logical.Response
+ var err error
+ b, storage := createBackendWithStorage(t)
+
+ roleData := map[string]interface{}{
+ "role_id": "role-id-123",
+ "policies": "a,b",
+ }
+
+ roleReq := &logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "role/testrole1",
+ Storage: storage,
+ Data: roleData,
+ }
+
+ // Set bind_secret_id, which is enabled by default
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ // Set bound_cidr_list alone by explicitly disabling bind_secret_id
+ roleReq.Operation = logical.UpdateOperation
+ roleData["bind_secret_id"] = false
+ roleData["bound_cidr_list"] = "0.0.0.0/0"
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ // Remove both constraints
+ roleReq.Operation = logical.UpdateOperation
+ roleData["bound_cidr_list"] = ""
+ roleData["bind_secret_id"] = false
+ resp, err = b.HandleRequest(roleReq)
+ if resp != nil && resp.IsError() {
+ t.Fatalf("err:%v, resp:%#v", err, resp)
+ }
+ if err == nil {
+ t.Fatalf("expected an error")
+ }
+}
+
+func TestAppRole_RoleIDUpdate(t *testing.T) {
+ var resp *logical.Response
+ var err error
+ b, storage := createBackendWithStorage(t)
+
+ roleData := map[string]interface{}{
+ "role_id": "role-id-123",
+ "policies": "a,b",
+ "secret_id_num_uses": 10,
+ "secret_id_ttl": 300,
+ "token_ttl": 400,
+ "token_max_ttl": 500,
+ }
+ roleReq := &logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "role/testrole1",
+ Storage: storage,
+ Data: roleData,
+ }
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleIDUpdateReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "role/testrole1/role-id",
+ Storage: storage,
+ Data: map[string]interface{}{
+ "role_id": "customroleid",
+ },
+ }
+ resp, err = b.HandleRequest(roleIDUpdateReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ secretIDReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Storage: storage,
+ Path: "role/testrole1/secret-id",
+ }
+ resp, err = b.HandleRequest(secretIDReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+ secretID := resp.Data["secret_id"].(string)
+
+ loginData := map[string]interface{}{
+ "role_id": "customroleid",
+ "secret_id": secretID,
+ }
+ loginReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "login",
+ Storage: storage,
+ Data: loginData,
+ Connection: &logical.Connection{
+ RemoteAddr: "127.0.0.1",
+ },
+ }
+ resp, err = b.HandleRequest(loginReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if resp.Auth == nil {
+ t.Fatalf("expected a non-nil auth object in the response")
+ }
+}
+
+func TestAppRole_RoleIDUniqueness(t *testing.T) {
+ var resp *logical.Response
+ var err error
+ b, storage := createBackendWithStorage(t)
+
+ roleData := map[string]interface{}{
+ "role_id": "role-id-123",
+ "policies": "a,b",
+ "secret_id_num_uses": 10,
+ "secret_id_ttl": 300,
+ "token_ttl": 400,
+ "token_max_ttl": 500,
+ }
+ roleReq := &logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "role/testrole1",
+ Storage: storage,
+ Data: roleData,
+ }
+
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleReq.Path = "role/testrole2"
+ resp, err = b.HandleRequest(roleReq)
+ if err == nil && !(resp != nil && resp.IsError()) {
+ t.Fatalf("expected an error: got resp:%#v", resp)
+ }
+
+ roleData["role_id"] = "role-id-456"
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleReq.Operation = logical.UpdateOperation
+ roleData["role_id"] = "role-id-123"
+ resp, err = b.HandleRequest(roleReq)
+ if err == nil && !(resp != nil && resp.IsError()) {
+ t.Fatalf("expected an error: got resp:%#v", resp)
+ }
+
+ roleReq.Path = "role/testrole1"
+ roleData["role_id"] = "role-id-456"
+ resp, err = b.HandleRequest(roleReq)
+ if err == nil && !(resp != nil && resp.IsError()) {
+ t.Fatalf("expected an error: got resp:%#v", resp)
+ }
+
+ roleIDData := map[string]interface{}{
+ "role_id": "role-id-456",
+ }
+ roleIDReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "role/testrole1/role-id",
+ Storage: storage,
+ Data: roleIDData,
+ }
+ resp, err = b.HandleRequest(roleIDReq)
+ if err == nil && !(resp != nil && resp.IsError()) {
+ t.Fatalf("expected an error: got resp:%#v", resp)
+ }
+
+ roleIDData["role_id"] = "role-id-123"
+ roleIDReq.Path = "role/testrole2/role-id"
+ resp, err = b.HandleRequest(roleIDReq)
+ if err == nil && !(resp != nil && resp.IsError()) {
+ t.Fatalf("expected an error: got resp:%#v", resp)
+ }
+
+ roleIDData["role_id"] = "role-id-2000"
+ resp, err = b.HandleRequest(roleIDReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleIDData["role_id"] = "role-id-1000"
+ roleIDReq.Path = "role/testrole1/role-id"
+ resp, err = b.HandleRequest(roleIDReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+}
+
+func TestAppRole_RoleDeleteSecretID(t *testing.T) {
+ var resp *logical.Response
+ var err error
+ b, storage := createBackendWithStorage(t)
+
+ createRole(t, b, storage, "role1", "a,b")
+ secretIDReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Storage: storage,
+ Path: "role/role1/secret-id",
+ }
+ // Create 3 secrets on the role
+ resp, err = b.HandleRequest(secretIDReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+ resp, err = b.HandleRequest(secretIDReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+ resp, err = b.HandleRequest(secretIDReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ listReq := &logical.Request{
+ Operation: logical.ListOperation,
+ Storage: storage,
+ Path: "role/role1/secret-id",
+ }
+ resp, err = b.HandleRequest(listReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+ secretIDAccessors := resp.Data["keys"].([]string)
+ if len(secretIDAccessors) != 3 {
+ t.Fatalf("bad: len of secretIDAccessors: expected:3 actual:%d", len(secretIDAccessors))
+ }
+
+ roleReq := &logical.Request{
+ Operation: logical.DeleteOperation,
+ Storage: storage,
+ Path: "role/role1",
+ }
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+ resp, err = b.HandleRequest(listReq)
+ if err != nil || resp == nil || (resp != nil && !resp.IsError()) {
+ t.Fatalf("expected an error. err:%v resp:%#v", err, resp)
+ }
+}
+
+func TestAppRole_RoleSecretIDReadDelete(t *testing.T) {
+ var resp *logical.Response
+ var err error
+ b, storage := createBackendWithStorage(t)
+
+ createRole(t, b, storage, "role1", "a,b")
+ secretIDCreateReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Storage: storage,
+ Path: "role/role1/secret-id",
+ }
+ resp, err = b.HandleRequest(secretIDCreateReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ secretID := resp.Data["secret_id"].(string)
+ if secretID == "" {
+ t.Fatal("expected non empty secret ID")
+ }
+
+ secretIDReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Storage: storage,
+ Path: "role/role1/secret-id/lookup",
+ Data: map[string]interface{}{
+ "secret_id": secretID,
+ },
+ }
+ resp, err = b.HandleRequest(secretIDReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+ if resp.Data == nil {
+ t.Fatal(err)
+ }
+
+ deleteSecretIDReq := &logical.Request{
+ Operation: logical.DeleteOperation,
+ Storage: storage,
+ Path: "role/role1/secret-id/destroy",
+ Data: map[string]interface{}{
+ "secret_id": secretID,
+ },
+ }
+ resp, err = b.HandleRequest(deleteSecretIDReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ resp, err = b.HandleRequest(secretIDReq)
+ if resp != nil && resp.IsError() {
+ t.Fatalf("error response:%#v", resp)
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestAppRole_RoleSecretIDAccessorReadDelete(t *testing.T) {
+ var resp *logical.Response
+ var err error
+ b, storage := createBackendWithStorage(t)
+
+ createRole(t, b, storage, "role1", "a,b")
+ secretIDReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Storage: storage,
+ Path: "role/role1/secret-id",
+ }
+ resp, err = b.HandleRequest(secretIDReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ listReq := &logical.Request{
+ Operation: logical.ListOperation,
+ Storage: storage,
+ Path: "role/role1/secret-id",
+ }
+ resp, err = b.HandleRequest(listReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+ hmacSecretID := resp.Data["keys"].([]string)[0]
+
+ hmacReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Storage: storage,
+ Path: "role/role1/secret-id-accessor/lookup",
+ Data: map[string]interface{}{
+ "secret_id_accessor": hmacSecretID,
+ },
+ }
+ resp, err = b.HandleRequest(hmacReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+ if resp.Data == nil {
+ t.Fatal(err)
+ }
+
+ hmacReq.Path = "role/role1/secret-id-accessor/destroy"
+ resp, err = b.HandleRequest(hmacReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ hmacReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(hmacReq)
+ if resp != nil && resp.IsError() {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+ if err == nil {
+ t.Fatalf("expected an error")
+ }
+}
+
+func TestAppRoleRoleListSecretID(t *testing.T) {
+ var resp *logical.Response
+ var err error
+ b, storage := createBackendWithStorage(t)
+
+ createRole(t, b, storage, "role1", "a,b")
+
+ secretIDReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Storage: storage,
+ Path: "role/role1/secret-id",
+ }
+ // Create 5 'secret_id's
+ resp, err = b.HandleRequest(secretIDReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+ resp, err = b.HandleRequest(secretIDReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+ resp, err = b.HandleRequest(secretIDReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+ resp, err = b.HandleRequest(secretIDReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+ resp, err = b.HandleRequest(secretIDReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ listReq := &logical.Request{
+ Operation: logical.ListOperation,
+ Storage: storage,
+ Path: "role/role1/secret-id/",
+ }
+ resp, err = b.HandleRequest(listReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+ secrets := resp.Data["keys"].([]string)
+ if len(secrets) != 5 {
+ t.Fatalf("bad: len of secrets: expected:5 actual:%d", len(secrets))
+ }
+}
+
+func TestAppRole_RoleList(t *testing.T) {
+ var resp *logical.Response
+ var err error
+ b, storage := createBackendWithStorage(t)
+
+ createRole(t, b, storage, "role1", "a,b")
+ createRole(t, b, storage, "role2", "c,d")
+ createRole(t, b, storage, "role3", "e,f")
+ createRole(t, b, storage, "role4", "g,h")
+ createRole(t, b, storage, "role5", "i,j")
+
+ listReq := &logical.Request{
+ Operation: logical.ListOperation,
+ Path: "role",
+ Storage: storage,
+ }
+ resp, err = b.HandleRequest(listReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ actual := resp.Data["keys"].([]string)
+ expected := []string{"role1", "role2", "role3", "role4", "role5"}
+ if !policyutil.EquivalentPolicies(actual, expected) {
+ t.Fatalf("bad: listed roles: expected:%s\nactual:%s", expected, actual)
+ }
+}
+
+func TestAppRole_RoleSecretID(t *testing.T) {
+ var resp *logical.Response
+ var err error
+ b, storage := createBackendWithStorage(t)
+
+ roleData := map[string]interface{}{
+ "policies": "p,q,r,s",
+ "secret_id_num_uses": 10,
+ "secret_id_ttl": 300,
+ "token_ttl": 400,
+ "token_max_ttl": 500,
+ }
+ roleReq := &logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "role/role1",
+ Storage: storage,
+ Data: roleData,
+ }
+
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleSecretIDReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "role/role1/secret-id",
+ Storage: storage,
+ }
+ resp, err = b.HandleRequest(roleSecretIDReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if resp.Data["secret_id"].(string) == "" {
+ t.Fatalf("failed to generate secret_id")
+ }
+
+ roleSecretIDReq.Path = "role/role1/custom-secret-id"
+ roleCustomSecretIDData := map[string]interface{}{
+ "secret_id": "abcd123",
+ }
+ roleSecretIDReq.Data = roleCustomSecretIDData
+ roleSecretIDReq.Operation = logical.UpdateOperation
+ resp, err = b.HandleRequest(roleSecretIDReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if resp.Data["secret_id"] != "abcd123" {
+ t.Fatalf("failed to set specific secret_id to role")
+ }
+}
+
+func TestAppRole_RoleCRUD(t *testing.T) {
+ var resp *logical.Response
+ var err error
+ b, storage := createBackendWithStorage(t)
+
+ roleData := map[string]interface{}{
+ "policies": "p,q,r,s",
+ "secret_id_num_uses": 10,
+ "secret_id_ttl": 300,
+ "token_ttl": 400,
+ "token_max_ttl": 500,
+ "token_num_uses": 600,
+ "bound_cidr_list": "127.0.0.1/32,127.0.0.1/16",
+ }
+ roleReq := &logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "role/role1",
+ Storage: storage,
+ Data: roleData,
+ }
+
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ expected := map[string]interface{}{
+ "bind_secret_id": true,
+ "policies": []string{"default", "p", "q", "r", "s"},
+ "secret_id_num_uses": 10,
+ "secret_id_ttl": 300,
+ "token_ttl": 400,
+ "token_max_ttl": 500,
+ "token_num_uses": 600,
+ "bound_cidr_list": "127.0.0.1/32,127.0.0.1/16",
+ }
+ var expectedStruct roleStorageEntry
+ err = mapstructure.Decode(expected, &expectedStruct)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var actualStruct roleStorageEntry
+ err = mapstructure.Decode(resp.Data, &actualStruct)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectedStruct.RoleID = actualStruct.RoleID
+ if !reflect.DeepEqual(expectedStruct, actualStruct) {
+ t.Fatalf("bad:\nexpected:%#v\nactual:%#v\n", expectedStruct, actualStruct)
+ }
+
+ roleData = map[string]interface{}{
+ "role_id": "test_role_id",
+ "policies": "a,b,c,d",
+ "secret_id_num_uses": 100,
+ "secret_id_ttl": 3000,
+ "token_ttl": 4000,
+ "token_max_ttl": 5000,
+ }
+ roleReq.Data = roleData
+ roleReq.Operation = logical.UpdateOperation
+
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ expected = map[string]interface{}{
+ "policies": []string{"a", "b", "c", "d", "default"},
+ "secret_id_num_uses": 100,
+ "secret_id_ttl": 3000,
+ "token_ttl": 4000,
+ "token_max_ttl": 5000,
+ }
+ err = mapstructure.Decode(expected, &expectedStruct)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = mapstructure.Decode(resp.Data, &actualStruct)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !reflect.DeepEqual(expectedStruct, actualStruct) {
+ t.Fatalf("bad:\nexpected:%#v\nactual:%#v\n", expectedStruct, actualStruct)
+ }
+
+ // RU for role_id field
+ roleReq.Path = "role/role1/role-id"
+ roleReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+ if resp.Data["role_id"].(string) != "test_role_id" {
+ t.Fatalf("bad: role_id: expected:test_role_id actual:%s\n", resp.Data["role_id"].(string))
+ }
+
+ roleReq.Data = map[string]interface{}{"role_id": "custom_role_id"}
+ roleReq.Operation = logical.UpdateOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+ if resp.Data["role_id"].(string) != "custom_role_id" {
+ t.Fatalf("bad: role_id: expected:custom_role_id actual:%s\n", resp.Data["role_id"].(string))
+ }
+
+ // RUD for bind_secret_id field
+ roleReq.Path = "role/role1/bind-secret-id"
+ roleReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleReq.Data = map[string]interface{}{"bind_secret_id": false}
+ roleReq.Operation = logical.UpdateOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if resp.Data["bind_secret_id"].(bool) {
+ t.Fatalf("bad: bind_secret_id: expected:false actual:%t\n", resp.Data["bind_secret_id"].(bool))
+ }
+ roleReq.Operation = logical.DeleteOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if !resp.Data["bind_secret_id"].(bool) {
+ t.Fatalf("expected the default value of 'true' to be set")
+ }
+
+ // RUD for policies field
+ roleReq.Path = "role/role1/policies"
+ roleReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleReq.Data = map[string]interface{}{"policies": "a1,b1,c1,d1"}
+ roleReq.Operation = logical.UpdateOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if !reflect.DeepEqual(resp.Data["policies"].([]string), []string{"a1", "b1", "c1", "d1", "default"}) {
+ t.Fatalf("bad: policies: actual:%s\n", resp.Data["policies"].([]string))
+ }
+ roleReq.Operation = logical.DeleteOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ expectedPolicies := []string{"default"}
+ actualPolicies := resp.Data["policies"].([]string)
+ if !policyutil.EquivalentPolicies(expectedPolicies, actualPolicies) {
+ t.Fatalf("bad: policies: expected:%s actual:%s", expectedPolicies, actualPolicies)
+ }
+
+ // RUD for secret-id-num-uses field
+ roleReq.Path = "role/role1/secret-id-num-uses"
+ roleReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleReq.Data = map[string]interface{}{"secret_id_num_uses": 200}
+ roleReq.Operation = logical.UpdateOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if resp.Data["secret_id_num_uses"].(int) != 200 {
+ t.Fatalf("bad: secret_id_num_uses: expected:200 actual:%d\n", resp.Data["secret_id_num_uses"].(int))
+ }
+ roleReq.Operation = logical.DeleteOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if resp.Data["secret_id_num_uses"].(int) != 0 {
+ t.Fatalf("expected value to be reset")
+ }
+
+ // RUD for secret_id_ttl field
+ roleReq.Path = "role/role1/secret-id-ttl"
+ roleReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleReq.Data = map[string]interface{}{"secret_id_ttl": 3001}
+ roleReq.Operation = logical.UpdateOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if resp.Data["secret_id_ttl"].(time.Duration) != 3001 {
+ t.Fatalf("bad: secret_id_ttl: expected:3001 actual:%d\n", resp.Data["secret_id_ttl"].(time.Duration))
+ }
+ roleReq.Operation = logical.DeleteOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if resp.Data["secret_id_ttl"].(time.Duration) != 0 {
+ t.Fatalf("expected value to be reset")
+ }
+
+ // RUD for secret-id-num-uses field
+ roleReq.Path = "role/role1/token-num-uses"
+ roleReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+ if resp.Data["token_num_uses"].(int) != 600 {
+ t.Fatalf("bad: token_num_uses: expected:600 actual:%d\n", resp.Data["token_num_uses"].(int))
+ }
+
+ roleReq.Data = map[string]interface{}{"token_num_uses": 60}
+ roleReq.Operation = logical.UpdateOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if resp.Data["token_num_uses"].(int) != 60 {
+ t.Fatalf("bad: token_num_uses: expected:60 actual:%d\n", resp.Data["token_num_uses"].(int))
+ }
+
+ roleReq.Operation = logical.DeleteOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if resp.Data["token_num_uses"].(int) != 0 {
+ t.Fatalf("expected value to be reset")
+ }
+
+ // RUD for 'period' field
+ roleReq.Path = "role/role1/period"
+ roleReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleReq.Data = map[string]interface{}{"period": 9001}
+ roleReq.Operation = logical.UpdateOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if resp.Data["period"].(time.Duration) != 9001 {
+ t.Fatalf("bad: period: expected:9001 actual:%d\n", resp.Data["9001"].(time.Duration))
+ }
+ roleReq.Operation = logical.DeleteOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if resp.Data["period"].(time.Duration) != 0 {
+ t.Fatalf("expected value to be reset")
+ }
+
+ // RUD for token_ttl field
+ roleReq.Path = "role/role1/token-ttl"
+ roleReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleReq.Data = map[string]interface{}{"token_ttl": 4001}
+ roleReq.Operation = logical.UpdateOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if resp.Data["token_ttl"].(time.Duration) != 4001 {
+ t.Fatalf("bad: token_ttl: expected:4001 actual:%d\n", resp.Data["token_ttl"].(time.Duration))
+ }
+ roleReq.Operation = logical.DeleteOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if resp.Data["token_ttl"].(time.Duration) != 0 {
+ t.Fatalf("expected value to be reset")
+ }
+
+ // RUD for token_max_ttl field
+ roleReq.Path = "role/role1/token-max-ttl"
+ roleReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleReq.Data = map[string]interface{}{"token_max_ttl": 5001}
+ roleReq.Operation = logical.UpdateOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if resp.Data["token_max_ttl"].(time.Duration) != 5001 {
+ t.Fatalf("bad: token_max_ttl: expected:5001 actual:%d\n", resp.Data["token_max_ttl"].(time.Duration))
+ }
+ roleReq.Operation = logical.DeleteOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if resp.Data["token_max_ttl"].(time.Duration) != 0 {
+ t.Fatalf("expected value to be reset")
+ }
+
+ // Delete test for role
+ roleReq.Path = "role/role1"
+ roleReq.Operation = logical.DeleteOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ roleReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if resp != nil {
+ t.Fatalf("expected a nil response")
+ }
+}
+
+func createRole(t *testing.T, b *backend, s logical.Storage, roleName, policies string) {
+ roleData := map[string]interface{}{
+ "policies": policies,
+ "secret_id_num_uses": 10,
+ "secret_id_ttl": 300,
+ "token_ttl": 400,
+ "token_max_ttl": 500,
+ }
+ roleReq := &logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "role/" + roleName,
+ Storage: s,
+ Data: roleData,
+ }
+
+ resp, err := b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_tidy_user_id.go b/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_tidy_user_id.go
new file mode 100644
index 0000000..4b06554
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_tidy_user_id.go
@@ -0,0 +1,99 @@
+package approle
+
+import (
+ "fmt"
+ "sync/atomic"
+ "time"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathTidySecretID(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "tidy/secret-id$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathTidySecretIDUpdate,
+ },
+
+ HelpSynopsis: pathTidySecretIDSyn,
+ HelpDescription: pathTidySecretIDDesc,
+ }
+}
+
+// tidySecretID is used to delete entries in the whitelist that are expired.
+func (b *backend) tidySecretID(s logical.Storage) error {
+ grabbed := atomic.CompareAndSwapUint32(&b.tidySecretIDCASGuard, 0, 1)
+ if grabbed {
+ defer atomic.StoreUint32(&b.tidySecretIDCASGuard, 0)
+ } else {
+ return fmt.Errorf("SecretID tidy operation already running")
+ }
+
+ roleNameHMACs, err := s.List("secret_id/")
+ if err != nil {
+ return err
+ }
+
+ var result error
+ for _, roleNameHMAC := range roleNameHMACs {
+ // roleNameHMAC will already have a '/' suffix. Don't append another one.
+ secretIDHMACs, err := s.List(fmt.Sprintf("secret_id/%s", roleNameHMAC))
+ if err != nil {
+ return err
+ }
+ for _, secretIDHMAC := range secretIDHMACs {
+ // In order to avoid lock swroleing in case there is need to delete,
+ // grab the write lock.
+ lock := b.secretIDLock(secretIDHMAC)
+ lock.Lock()
+ // roleNameHMAC will already have a '/' suffix. Don't append another one.
+ entryIndex := fmt.Sprintf("secret_id/%s%s", roleNameHMAC, secretIDHMAC)
+ secretIDEntry, err := s.Get(entryIndex)
+ if err != nil {
+ lock.Unlock()
+ return fmt.Errorf("error fetching SecretID %s: %s", secretIDHMAC, err)
+ }
+
+ if secretIDEntry == nil {
+ result = multierror.Append(result, fmt.Errorf("entry for SecretID %s is nil", secretIDHMAC))
+ lock.Unlock()
+ continue
+ }
+
+ if secretIDEntry.Value == nil || len(secretIDEntry.Value) == 0 {
+ lock.Unlock()
+ return fmt.Errorf("found entry for SecretID %s but actual SecretID is empty", secretIDHMAC)
+ }
+
+ var result secretIDStorageEntry
+ if err := secretIDEntry.DecodeJSON(&result); err != nil {
+ lock.Unlock()
+ return err
+ }
+
+ // ExpirationTime not being set indicates non-expiring SecretIDs
+ if !result.ExpirationTime.IsZero() && time.Now().After(result.ExpirationTime) {
+ if err := s.Delete(entryIndex); err != nil {
+ lock.Unlock()
+ return fmt.Errorf("error deleting SecretID %s from storage: %s", secretIDHMAC, err)
+ }
+ }
+ lock.Unlock()
+ }
+ }
+ return result
+}
+
+// pathTidySecretIDUpdate is used to delete the expired SecretID entries
+func (b *backend) pathTidySecretIDUpdate(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ return nil, b.tidySecretID(req.Storage)
+}
+
+const pathTidySecretIDSyn = "Trigger the clean-up of expired SecretID entries."
+const pathTidySecretIDDesc = `SecretIDs will have expiratin time attached to them. The periodic function
+of the backend will look for expired entries and delete them. This happens once in a minute. Invoking
+this endpoint will trigger the clean-up action, without waiting for the backend's periodic function.`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/approle/validation.go b/vendor/github.com/hashicorp/vault/builtin/credential/approle/validation.go
new file mode 100644
index 0000000..db668a8
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/approle/validation.go
@@ -0,0 +1,562 @@
+package approle
+
+import (
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/helper/cidrutil"
+ "github.com/hashicorp/vault/helper/locksutil"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+// secretIDStorageEntry represents the information stored in storage
+// when a SecretID is created. The structure of the SecretID storage
+// entry is the same for all the types of SecretIDs generated.
+type secretIDStorageEntry struct {
+ // Accessor for the SecretID. It is a random UUID serving as
+ // a secondary index for the SecretID. This uniquely identifies
+ // the SecretID it belongs to, and hence can be used for listing
+ // and deleting SecretIDs. Accessors cannot be used as valid
+ // SecretIDs during login.
+ SecretIDAccessor string `json:"secret_id_accessor" structs:"secret_id_accessor" mapstructure:"secret_id_accessor"`
+
+ // Number of times this SecretID can be used to perform the login
+ // operation
+ SecretIDNumUses int `json:"secret_id_num_uses" structs:"secret_id_num_uses" mapstructure:"secret_id_num_uses"`
+
+ // Duration after which this SecretID should expire. This is capped by
+ // the backend mount's max TTL value.
+ SecretIDTTL time.Duration `json:"secret_id_ttl" structs:"secret_id_ttl" mapstructure:"secret_id_ttl"`
+
+ // The time when the SecretID was created
+ CreationTime time.Time `json:"creation_time" structs:"creation_time" mapstructure:"creation_time"`
+
+ // The time when the SecretID becomes eligible for tidy operation.
+ // Tidying is performed by the PeriodicFunc of the backend which is 1
+ // minute apart.
+ ExpirationTime time.Time `json:"expiration_time" structs:"expiration_time" mapstructure:"expiration_time"`
+
+ // The time representing the last time this storage entry was modified
+ LastUpdatedTime time.Time `json:"last_updated_time" structs:"last_updated_time" mapstructure:"last_updated_time"`
+
+ // Metadata that belongs to the SecretID
+ Metadata map[string]string `json:"metadata" structs:"metadata" mapstructure:"metadata"`
+
+ // CIDRList is a set of CIDR blocks that impose source address
+ // restrictions on the usage of SecretID
+ CIDRList []string `json:"cidr_list" structs:"cidr_list" mapstructure:"cidr_list"`
+
+ // This is a deprecated field
+ SecretIDNumUsesDeprecated int `json:"SecretIDNumUses" structs:"SecretIDNumUses" mapstructure:"SecretIDNumUses"`
+}
+
+// Represents the payload of the storage entry of the accessor that maps to a
+// unique SecretID. Note that SecretIDs should never be stored in plaintext
+// anywhere in the backend. SecretIDHMAC will be used as an index to fetch the
+// properties of the SecretID and to delete the SecretID.
+type secretIDAccessorStorageEntry struct {
+ // Hash of the SecretID which can be used to find the storage index at which
+ // properties of SecretID is stored.
+ SecretIDHMAC string `json:"secret_id_hmac" structs:"secret_id_hmac" mapstructure:"secret_id_hmac"`
+}
+
+// Checks if the Role represented by the RoleID still exists
+func (b *backend) validateRoleID(s logical.Storage, roleID string) (*roleStorageEntry, string, error) {
+ // Look for the storage entry that maps the roleID to role
+ roleIDIndex, err := b.roleIDEntry(s, roleID)
+ if err != nil {
+ return nil, "", err
+ }
+ if roleIDIndex == nil {
+ return nil, "", fmt.Errorf("failed to find secondary index for role_id %q\n", roleID)
+ }
+
+ role, err := b.roleEntry(s, roleIDIndex.Name)
+ if err != nil {
+ return nil, "", err
+ }
+ if role == nil {
+ return nil, "", fmt.Errorf("role %q referred by the SecretID does not exist", roleIDIndex.Name)
+ }
+
+ return role, roleIDIndex.Name, nil
+}
+
+// Validates the supplied RoleID and SecretID
+func (b *backend) validateCredentials(req *logical.Request, data *framework.FieldData) (*roleStorageEntry, string, map[string]string, error) {
+ var metadata map[string]string
+ // RoleID must be supplied during every login
+ roleID := strings.TrimSpace(data.Get("role_id").(string))
+ if roleID == "" {
+ return nil, "", metadata, fmt.Errorf("missing role_id")
+ }
+
+ // Validate the RoleID and get the Role entry
+ role, roleName, err := b.validateRoleID(req.Storage, roleID)
+ if err != nil {
+ return nil, "", metadata, err
+ }
+ if role == nil || roleName == "" {
+ return nil, "", metadata, fmt.Errorf("failed to validate role_id")
+ }
+
+ // Calculate the TTL boundaries since this reflects the properties of the token issued
+ if role.TokenTTL, role.TokenMaxTTL, err = b.SanitizeTTL(role.TokenTTL, role.TokenMaxTTL); err != nil {
+ return nil, "", metadata, err
+ }
+
+ if role.BindSecretID {
+ // If 'bind_secret_id' was set on role, look for the field 'secret_id'
+ // to be specified and validate it.
+ secretID := strings.TrimSpace(data.Get("secret_id").(string))
+ if secretID == "" {
+ return nil, "", metadata, fmt.Errorf("missing secret_id")
+ }
+
+ // Check if the SecretID supplied is valid. If use limit was specified
+ // on the SecretID, it will be decremented in this call.
+ var valid bool
+ valid, metadata, err = b.validateBindSecretID(req, roleName, secretID, role.HMACKey, role.BoundCIDRList)
+ if err != nil {
+ return nil, "", metadata, err
+ }
+ if !valid {
+ return nil, "", metadata, fmt.Errorf("invalid secret_id %q", secretID)
+ }
+ }
+
+ if role.BoundCIDRList != "" {
+ // If 'bound_cidr_list' was set, verify the CIDR restrictions
+ if req.Connection == nil || req.Connection.RemoteAddr == "" {
+ return nil, "", metadata, fmt.Errorf("failed to get connection information")
+ }
+
+ belongs, err := cidrutil.IPBelongsToCIDRBlocksString(req.Connection.RemoteAddr, role.BoundCIDRList, ",")
+ if err != nil {
+ return nil, "", metadata, fmt.Errorf("failed to verify the CIDR restrictions set on the role: %v", err)
+ }
+ if !belongs {
+ return nil, "", metadata, fmt.Errorf("source address %q unauthorized through CIDR restrictions on the role", req.Connection.RemoteAddr)
+ }
+ }
+
+ return role, roleName, metadata, nil
+}
+
+// validateBindSecretID is used to determine if the given SecretID is a valid one.
+func (b *backend) validateBindSecretID(req *logical.Request, roleName, secretID,
+ hmacKey, roleBoundCIDRList string) (bool, map[string]string, error) {
+ secretIDHMAC, err := createHMAC(hmacKey, secretID)
+ if err != nil {
+ return false, nil, fmt.Errorf("failed to create HMAC of secret_id: %v", err)
+ }
+
+ roleNameHMAC, err := createHMAC(hmacKey, roleName)
+ if err != nil {
+ return false, nil, fmt.Errorf("failed to create HMAC of role_name: %v", err)
+ }
+
+ entryIndex := fmt.Sprintf("secret_id/%s/%s", roleNameHMAC, secretIDHMAC)
+
+ // SecretID locks are always index based on secretIDHMACs. This helps
+ // acquiring the locks when the SecretIDs are listed. This allows grabbing
+ // the correct locks even if the SecretIDs are not known in plaintext.
+ lock := b.secretIDLock(secretIDHMAC)
+ lock.RLock()
+
+ result, err := b.nonLockedSecretIDStorageEntry(req.Storage, roleNameHMAC, secretIDHMAC)
+ if err != nil {
+ lock.RUnlock()
+ return false, nil, err
+ } else if result == nil {
+ lock.RUnlock()
+ return false, nil, nil
+ }
+
+ // SecretIDNumUses will be zero only if the usage limit was not set at all,
+ // in which case, the SecretID will remain to be valid as long as it is not
+ // expired.
+ if result.SecretIDNumUses == 0 {
+ // Ensure that the CIDRs on the secret ID are still a subset of that of
+ // role's
+ if err := verifyCIDRRoleSecretIDSubset(result.CIDRList,
+ roleBoundCIDRList); err != nil {
+ return false, nil, err
+ }
+
+ // If CIDR restrictions are present on the secret ID, check if the
+ // source IP complies to it
+ if len(result.CIDRList) != 0 {
+ if req.Connection == nil || req.Connection.RemoteAddr == "" {
+ return false, nil, fmt.Errorf("failed to get connection information")
+ }
+
+ if belongs, err := cidrutil.IPBelongsToCIDRBlocksSlice(req.Connection.RemoteAddr, result.CIDRList); !belongs || err != nil {
+ return false, nil, fmt.Errorf("source address %q unauthorized through CIDR restrictions on the secret ID: %v", req.Connection.RemoteAddr, err)
+ }
+ }
+
+ lock.RUnlock()
+ return true, result.Metadata, nil
+ }
+
+ // If the SecretIDNumUses is non-zero, it means that its use-count should be updated
+ // in the storage. Switch the lock from a `read` to a `write` and update
+ // the storage entry.
+ lock.RUnlock()
+
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Lock switching may change the data. Refresh the contents.
+ result, err = b.nonLockedSecretIDStorageEntry(req.Storage, roleNameHMAC, secretIDHMAC)
+ if err != nil {
+ return false, nil, err
+ }
+ if result == nil {
+ return false, nil, nil
+ }
+
+ // If there exists a single use left, delete the SecretID entry from
+ // the storage but do not fail the validation request. Subsequest
+ // requests to use the same SecretID will fail.
+ if result.SecretIDNumUses == 1 {
+ // Delete the secret IDs accessor first
+ if err := b.deleteSecretIDAccessorEntry(req.Storage, result.SecretIDAccessor); err != nil {
+ return false, nil, err
+ }
+ if err := req.Storage.Delete(entryIndex); err != nil {
+ return false, nil, fmt.Errorf("failed to delete secret ID: %v", err)
+ }
+ } else {
+ // If the use count is greater than one, decrement it and update the last updated time.
+ result.SecretIDNumUses -= 1
+ result.LastUpdatedTime = time.Now()
+ if entry, err := logical.StorageEntryJSON(entryIndex, &result); err != nil {
+ return false, nil, fmt.Errorf("failed to decrement the use count for secret ID %q", secretID)
+ } else if err = req.Storage.Put(entry); err != nil {
+ return false, nil, fmt.Errorf("failed to decrement the use count for secret ID %q", secretID)
+ }
+ }
+
+ // Ensure that the CIDRs on the secret ID are still a subset of that of
+ // role's
+ if err := verifyCIDRRoleSecretIDSubset(result.CIDRList,
+ roleBoundCIDRList); err != nil {
+ return false, nil, err
+ }
+
+ // If CIDR restrictions are present on the secret ID, check if the
+ // source IP complies to it
+ if len(result.CIDRList) != 0 {
+ if req.Connection == nil || req.Connection.RemoteAddr == "" {
+ return false, nil, fmt.Errorf("failed to get connection information")
+ }
+
+ if belongs, err := cidrutil.IPBelongsToCIDRBlocksSlice(req.Connection.RemoteAddr, result.CIDRList); !belongs || err != nil {
+ return false, nil, fmt.Errorf("source address %q unauthorized through CIDR restrictions on the secret ID: %v", req.Connection.RemoteAddr, err)
+ }
+ }
+
+ return true, result.Metadata, nil
+}
+
+// verifyCIDRRoleSecretIDSubset checks if the CIDR blocks set on the secret ID
+// are a subset of CIDR blocks set on the role
+func verifyCIDRRoleSecretIDSubset(secretIDCIDRs []string, roleBoundCIDRList string) error {
+ if len(secretIDCIDRs) != 0 {
+ // Parse the CIDRs on role as a slice
+ roleCIDRs := strutil.ParseDedupLowercaseAndSortStrings(roleBoundCIDRList, ",")
+
+ // If there are no CIDR blocks on the role, then the subset
+ // requirement would be satisfied
+ if len(roleCIDRs) != 0 {
+ subset, err := cidrutil.SubsetBlocks(roleCIDRs, secretIDCIDRs)
+ if !subset || err != nil {
+ return fmt.Errorf("failed to verify subset relationship between CIDR blocks on the role %q and CIDR blocks on the secret ID %q: %v", roleCIDRs, secretIDCIDRs, err)
+ }
+ }
+ }
+
+ return nil
+}
+
+// Creates a SHA256 HMAC of the given 'value' using the given 'key' and returns
+// a hex encoded string.
+func createHMAC(key, value string) (string, error) {
+ if key == "" {
+ return "", fmt.Errorf("invalid HMAC key")
+ }
+ hm := hmac.New(sha256.New, []byte(key))
+ hm.Write([]byte(value))
+ return hex.EncodeToString(hm.Sum(nil)), nil
+}
+
+func (b *backend) secretIDLock(secretIDHMAC string) *locksutil.LockEntry {
+ return locksutil.LockForKey(b.secretIDLocks, secretIDHMAC)
+}
+
+func (b *backend) secretIDAccessorLock(secretIDAccessor string) *locksutil.LockEntry {
+ return locksutil.LockForKey(b.secretIDAccessorLocks, secretIDAccessor)
+}
+
+// nonLockedSecretIDStorageEntry fetches the secret ID properties from physical
+// storage. The entry will be indexed based on the given HMACs of both role
+// name and the secret ID. This method will not acquire secret ID lock to fetch
+// the storage entry. Locks need to be acquired before calling this method.
+func (b *backend) nonLockedSecretIDStorageEntry(s logical.Storage, roleNameHMAC, secretIDHMAC string) (*secretIDStorageEntry, error) {
+ if secretIDHMAC == "" {
+ return nil, fmt.Errorf("missing secret ID HMAC")
+ }
+
+ if roleNameHMAC == "" {
+ return nil, fmt.Errorf("missing role name HMAC")
+ }
+
+ // Prepare the storage index at which the secret ID will be stored
+ entryIndex := fmt.Sprintf("secret_id/%s/%s", roleNameHMAC, secretIDHMAC)
+
+ entry, err := s.Get(entryIndex)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ result := secretIDStorageEntry{}
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ // TODO: Remove this upgrade bit in future releases
+ persistNeeded := false
+ if result.SecretIDNumUsesDeprecated != 0 {
+ if result.SecretIDNumUses == 0 ||
+ result.SecretIDNumUsesDeprecated < result.SecretIDNumUses {
+ result.SecretIDNumUses = result.SecretIDNumUsesDeprecated
+ persistNeeded = true
+ }
+ if result.SecretIDNumUses < result.SecretIDNumUsesDeprecated {
+ result.SecretIDNumUsesDeprecated = result.SecretIDNumUses
+ persistNeeded = true
+ }
+ }
+
+ if persistNeeded {
+ if err := b.nonLockedSetSecretIDStorageEntry(s, roleNameHMAC, secretIDHMAC, &result); err != nil {
+ return nil, fmt.Errorf("failed to upgrade role storage entry %s", err)
+ }
+ }
+
+ return &result, nil
+}
+
+// nonLockedSetSecretIDStorageEntry creates or updates a secret ID entry at the
+// physical storage. The entry will be indexed based on the given HMACs of both
+// role name and the secret ID. This method will not acquire secret ID lock to
+// create/update the storage entry. Locks need to be acquired before calling
+// this method.
+func (b *backend) nonLockedSetSecretIDStorageEntry(s logical.Storage, roleNameHMAC, secretIDHMAC string, secretEntry *secretIDStorageEntry) error {
+ if secretIDHMAC == "" {
+ return fmt.Errorf("missing secret ID HMAC")
+ }
+
+ if roleNameHMAC == "" {
+ return fmt.Errorf("missing role name HMAC")
+ }
+
+ if secretEntry == nil {
+ return fmt.Errorf("nil secret entry")
+ }
+
+ entryIndex := fmt.Sprintf("secret_id/%s/%s", roleNameHMAC, secretIDHMAC)
+
+ if entry, err := logical.StorageEntryJSON(entryIndex, secretEntry); err != nil {
+ return err
+ } else if err = s.Put(entry); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// registerSecretIDEntry creates a new storage entry for the given SecretID.
+func (b *backend) registerSecretIDEntry(s logical.Storage, roleName, secretID, hmacKey string, secretEntry *secretIDStorageEntry) (*secretIDStorageEntry, error) {
+ secretIDHMAC, err := createHMAC(hmacKey, secretID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create HMAC of secret ID: %v", err)
+ }
+ roleNameHMAC, err := createHMAC(hmacKey, roleName)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create HMAC of role_name: %v", err)
+ }
+
+ lock := b.secretIDLock(secretIDHMAC)
+ lock.RLock()
+
+ entry, err := b.nonLockedSecretIDStorageEntry(s, roleNameHMAC, secretIDHMAC)
+ if err != nil {
+ lock.RUnlock()
+ return nil, err
+ }
+ if entry != nil {
+ lock.RUnlock()
+ return nil, fmt.Errorf("SecretID is already registered")
+ }
+
+ // If there isn't an entry for the secretID already, switch the read lock
+ // with a write lock and create an entry.
+ lock.RUnlock()
+ lock.Lock()
+ defer lock.Unlock()
+
+ // But before saving a new entry, check if the secretID entry was created during the lock switch.
+ entry, err = b.nonLockedSecretIDStorageEntry(s, roleNameHMAC, secretIDHMAC)
+ if err != nil {
+ return nil, err
+ }
+ if entry != nil {
+ return nil, fmt.Errorf("SecretID is already registered")
+ }
+
+ //
+ // Create a new entry for the SecretID
+ //
+
+ // Set the creation time for the SecretID
+ currentTime := time.Now()
+ secretEntry.CreationTime = currentTime
+ secretEntry.LastUpdatedTime = currentTime
+
+ // If SecretIDTTL is not specified or if it crosses the backend mount's limit,
+ // cap the expiration to backend's max. Otherwise, use it to determine the
+ // expiration time.
+ if secretEntry.SecretIDTTL < time.Duration(0) || secretEntry.SecretIDTTL > b.System().MaxLeaseTTL() {
+ secretEntry.ExpirationTime = currentTime.Add(b.System().MaxLeaseTTL())
+ } else if secretEntry.SecretIDTTL != time.Duration(0) {
+ // Set the ExpirationTime only if SecretIDTTL was set. SecretIDs should not
+ // expire by default.
+ secretEntry.ExpirationTime = currentTime.Add(secretEntry.SecretIDTTL)
+ }
+
+ // Before storing the SecretID, store its accessor.
+ if err := b.createSecretIDAccessorEntry(s, secretEntry, secretIDHMAC); err != nil {
+ return nil, err
+ }
+
+ if err := b.nonLockedSetSecretIDStorageEntry(s, roleNameHMAC, secretIDHMAC, secretEntry); err != nil {
+ return nil, err
+ }
+
+ return secretEntry, nil
+}
+
+// secretIDAccessorEntry is used to read the storage entry that maps an
+// accessor to a secret_id.
+func (b *backend) secretIDAccessorEntry(s logical.Storage, secretIDAccessor string) (*secretIDAccessorStorageEntry, error) {
+ if secretIDAccessor == "" {
+ return nil, fmt.Errorf("missing secretIDAccessor")
+ }
+
+ var result secretIDAccessorStorageEntry
+
+ // Create index entry, mapping the accessor to the token ID
+ entryIndex := "accessor/" + b.salt.SaltID(secretIDAccessor)
+
+ accessorLock := b.secretIDAccessorLock(secretIDAccessor)
+ accessorLock.RLock()
+ defer accessorLock.RUnlock()
+
+ if entry, err := s.Get(entryIndex); err != nil {
+ return nil, err
+ } else if entry == nil {
+ return nil, nil
+ } else if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+// createSecretIDAccessorEntry creates an identifier for the SecretID. A storage index,
+// mapping the accessor to the SecretID is also created. This method should
+// be called when the lock for the corresponding SecretID is held.
+func (b *backend) createSecretIDAccessorEntry(s logical.Storage, entry *secretIDStorageEntry, secretIDHMAC string) error {
+ // Create a random accessor
+ accessorUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ return err
+ }
+ entry.SecretIDAccessor = accessorUUID
+
+ // Create index entry, mapping the accessor to the token ID
+ entryIndex := "accessor/" + b.salt.SaltID(entry.SecretIDAccessor)
+
+ accessorLock := b.secretIDAccessorLock(accessorUUID)
+ accessorLock.Lock()
+ defer accessorLock.Unlock()
+
+ if entry, err := logical.StorageEntryJSON(entryIndex, &secretIDAccessorStorageEntry{
+ SecretIDHMAC: secretIDHMAC,
+ }); err != nil {
+ return err
+ } else if err = s.Put(entry); err != nil {
+ return fmt.Errorf("failed to persist accessor index entry: %v", err)
+ }
+
+ return nil
+}
+
+// deleteSecretIDAccessorEntry deletes the storage index mapping the accessor to a SecretID.
+func (b *backend) deleteSecretIDAccessorEntry(s logical.Storage, secretIDAccessor string) error {
+ accessorEntryIndex := "accessor/" + b.salt.SaltID(secretIDAccessor)
+
+ accessorLock := b.secretIDAccessorLock(secretIDAccessor)
+ accessorLock.Lock()
+ defer accessorLock.Unlock()
+
+ // Delete the accessor of the SecretID first
+ if err := s.Delete(accessorEntryIndex); err != nil {
+ return fmt.Errorf("failed to delete accessor storage entry: %v", err)
+ }
+
+ return nil
+}
+
+// flushRoleSecrets deletes all the SecretIDs that belong to the given
+// RoleID.
+func (b *backend) flushRoleSecrets(s logical.Storage, roleName, hmacKey string) error {
+ roleNameHMAC, err := createHMAC(hmacKey, roleName)
+ if err != nil {
+ return fmt.Errorf("failed to create HMAC of role_name: %v", err)
+ }
+
+ // Acquire the custom lock to perform listing of SecretIDs
+ b.secretIDListingLock.RLock()
+ defer b.secretIDListingLock.RUnlock()
+
+ secretIDHMACs, err := s.List(fmt.Sprintf("secret_id/%s/", roleNameHMAC))
+ if err != nil {
+ return err
+ }
+ for _, secretIDHMAC := range secretIDHMACs {
+ // Acquire the lock belonging to the SecretID
+ lock := b.secretIDLock(secretIDHMAC)
+ lock.Lock()
+ entryIndex := fmt.Sprintf("secret_id/%s/%s", roleNameHMAC, secretIDHMAC)
+ if err := s.Delete(entryIndex); err != nil {
+ lock.Unlock()
+ return fmt.Errorf("error deleting SecretID %q from storage: %v", secretIDHMAC, err)
+ }
+ lock.Unlock()
+ }
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/approle/validation_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/approle/validation_test.go
new file mode 100644
index 0000000..aa66644
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/approle/validation_test.go
@@ -0,0 +1,58 @@
+package approle
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestAppRole_SecretIDNumUsesUpgrade(t *testing.T) {
+ var resp *logical.Response
+ var err error
+
+ b, storage := createBackendWithStorage(t)
+
+ roleData := map[string]interface{}{
+ "secret_id_num_uses": 10,
+ }
+
+ roleReq := &logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "role/role1",
+ Storage: storage,
+ Data: roleData,
+ }
+
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ secretIDReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "role/role1/secret-id",
+ Storage: storage,
+ }
+
+ resp, err = b.HandleRequest(secretIDReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ secretIDReq.Operation = logical.UpdateOperation
+ secretIDReq.Path = "role/role1/secret-id/lookup"
+ secretIDReq.Data = map[string]interface{}{
+ "secret_id": resp.Data["secret_id"].(string),
+ }
+ resp, err = b.HandleRequest(secretIDReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ // Check if the response contains the value set for secret_id_num_uses
+ // and not SecretIDNumUses
+ if resp.Data["secret_id_num_uses"] != 10 ||
+ resp.Data["SecretIDNumUses"] != 0 {
+ t.Fatal("invalid secret_id_num_uses")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/backend.go
new file mode 100644
index 0000000..fbd62c7
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/backend.go
@@ -0,0 +1,210 @@
+package awsauth
+
+import (
+ "sync"
+ "time"
+
+ "github.com/aws/aws-sdk-go/service/ec2"
+ "github.com/aws/aws-sdk-go/service/iam"
+ "github.com/hashicorp/vault/helper/salt"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
+ b, err := Backend(conf)
+ if err != nil {
+ return nil, err
+ }
+ return b.Setup(conf)
+}
+
+type backend struct {
+ *framework.Backend
+ Salt *salt.Salt
+
+ // Used during initialization to set the salt
+ view logical.Storage
+
+ // Lock to make changes to any of the backend's configuration endpoints.
+ configMutex sync.RWMutex
+
+ // Lock to make changes to role entries
+ roleMutex sync.RWMutex
+
+ // Lock to make changes to the blacklist entries
+ blacklistMutex sync.RWMutex
+
+ // Guards the blacklist/whitelist tidy functions
+ tidyBlacklistCASGuard uint32
+ tidyWhitelistCASGuard uint32
+
+ // Duration after which the periodic function of the backend needs to
+ // tidy the blacklist and whitelist entries.
+ tidyCooldownPeriod time.Duration
+
+ // nextTidyTime holds the time at which the periodic func should initiatite
+ // the tidy operations. This is set by the periodicFunc based on the value
+ // of tidyCooldownPeriod.
+ nextTidyTime time.Time
+
+ // Map to hold the EC2 client objects indexed by region and STS role.
+ // This avoids the overhead of creating a client object for every login request.
+ // When the credentials are modified or deleted, all the cached client objects
+ // will be flushed. The empty STS role signifies the master account
+ EC2ClientsMap map[string]map[string]*ec2.EC2
+
+ // Map to hold the IAM client objects indexed by region and STS role.
+ // This avoids the overhead of creating a client object for every login request.
+ // When the credentials are modified or deleted, all the cached client objects
+ // will be flushed. The empty STS role signifies the master account
+ IAMClientsMap map[string]map[string]*iam.IAM
+}
+
+func Backend(conf *logical.BackendConfig) (*backend, error) {
+ b := &backend{
+ // Setting the periodic func to be run once in an hour.
+ // If there is a real need, this can be made configurable.
+ tidyCooldownPeriod: time.Hour,
+ view: conf.StorageView,
+ EC2ClientsMap: make(map[string]map[string]*ec2.EC2),
+ IAMClientsMap: make(map[string]map[string]*iam.IAM),
+ }
+
+ b.Backend = &framework.Backend{
+ PeriodicFunc: b.periodicFunc,
+ AuthRenew: b.pathLoginRenew,
+ Help: backendHelp,
+ PathsSpecial: &logical.Paths{
+ Unauthenticated: []string{
+ "login",
+ },
+ LocalStorage: []string{
+ "whitelist/identity/",
+ },
+ },
+ Paths: []*framework.Path{
+ pathLogin(b),
+ pathListRole(b),
+ pathListRoles(b),
+ pathRole(b),
+ pathRoleTag(b),
+ pathConfigClient(b),
+ pathConfigCertificate(b),
+ pathConfigSts(b),
+ pathListSts(b),
+ pathConfigTidyRoletagBlacklist(b),
+ pathConfigTidyIdentityWhitelist(b),
+ pathListCertificates(b),
+ pathListRoletagBlacklist(b),
+ pathRoletagBlacklist(b),
+ pathTidyRoletagBlacklist(b),
+ pathListIdentityWhitelist(b),
+ pathIdentityWhitelist(b),
+ pathTidyIdentityWhitelist(b),
+ },
+
+ Invalidate: b.invalidate,
+
+ Init: b.initialize,
+ }
+
+ return b, nil
+}
+
+func (b *backend) initialize() error {
+ salt, err := salt.NewSalt(b.view, &salt.Config{
+ HashFunc: salt.SHA256Hash,
+ })
+ if err != nil {
+ return err
+ }
+ b.Salt = salt
+ return nil
+}
+
+// periodicFunc performs the tasks that the backend wishes to do periodically.
+// Currently this will be triggered once in a minute by the RollbackManager.
+//
+// The tasks being done currently by this function are to cleanup the expired
+// entries of both blacklist role tags and whitelist identities. Tidying is done
+// not once in a minute, but once in an hour, controlled by 'tidyCooldownPeriod'.
+// Tidying of blacklist and whitelist are by default enabled. This can be
+// changed using `config/tidy/roletags` and `config/tidy/identities` endpoints.
+func (b *backend) periodicFunc(req *logical.Request) error {
+ // Run the tidy operations for the first time. Then run it when current
+ // time matches the nextTidyTime.
+ if b.nextTidyTime.IsZero() || !time.Now().Before(b.nextTidyTime) {
+ // safety_buffer defaults to 180 days for roletag blacklist
+ safety_buffer := 15552000
+ tidyBlacklistConfigEntry, err := b.lockedConfigTidyRoleTags(req.Storage)
+ if err != nil {
+ return err
+ }
+ skipBlacklistTidy := false
+ // check if tidying of role tags was configured
+ if tidyBlacklistConfigEntry != nil {
+ // check if periodic tidying of role tags was disabled
+ if tidyBlacklistConfigEntry.DisablePeriodicTidy {
+ skipBlacklistTidy = true
+ }
+ // overwrite the default safety_buffer with the configured value
+ safety_buffer = tidyBlacklistConfigEntry.SafetyBuffer
+ }
+ // tidy role tags if explicitly not disabled
+ if !skipBlacklistTidy {
+ b.tidyBlacklistRoleTag(req.Storage, safety_buffer)
+ }
+
+ // reset the safety_buffer to 72h
+ safety_buffer = 259200
+ tidyWhitelistConfigEntry, err := b.lockedConfigTidyIdentities(req.Storage)
+ if err != nil {
+ return err
+ }
+ skipWhitelistTidy := false
+ // check if tidying of identities was configured
+ if tidyWhitelistConfigEntry != nil {
+ // check if periodic tidying of identities was disabled
+ if tidyWhitelistConfigEntry.DisablePeriodicTidy {
+ skipWhitelistTidy = true
+ }
+ // overwrite the default safety_buffer with the configured value
+ safety_buffer = tidyWhitelistConfigEntry.SafetyBuffer
+ }
+ // tidy identities if explicitly not disabled
+ if !skipWhitelistTidy {
+ b.tidyWhitelistIdentity(req.Storage, safety_buffer)
+ }
+
+ // Update the time at which to run the tidy functions again.
+ b.nextTidyTime = time.Now().Add(b.tidyCooldownPeriod)
+ }
+ return nil
+}
+
+func (b *backend) invalidate(key string) {
+ switch key {
+ case "config/client":
+ b.configMutex.Lock()
+ defer b.configMutex.Unlock()
+ b.flushCachedEC2Clients()
+ b.flushCachedIAMClients()
+ }
+}
+
+const backendHelp = `
+aws-ec2 auth backend takes in PKCS#7 signature of an AWS EC2 instance and a client
+created nonce to authenticates the EC2 instance with Vault.
+
+Authentication is backed by a preconfigured role in the backend. The role
+represents the authorization of resources by containing Vault's policies.
+Role can be created using 'role/' endpoint.
+
+If there is need to further restrict the capabilities of the role on the instance
+that is using the role, 'role_tag' option can be enabled on the role, and a tag
+can be generated using 'role//tag' endpoint. This tag represents the
+subset of capabilities set on the role. When the 'role_tag' option is enabled on
+the role, the login operation requires that a respective role tag is attached to
+the EC2 instance which performs the login.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/backend_test.go
new file mode 100644
index 0000000..a539fba
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/backend_test.go
@@ -0,0 +1,1504 @@
+package awsauth
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/sts"
+ "github.com/hashicorp/vault/helper/policyutil"
+ "github.com/hashicorp/vault/logical"
+ logicaltest "github.com/hashicorp/vault/logical/testing"
+)
+
+func TestBackend_CreateParseVerifyRoleTag(t *testing.T) {
+ // create a backend
+ config := logical.TestBackendConfig()
+ storage := &logical.InmemStorage{}
+ config.StorageView = storage
+
+ b, err := Backend(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = b.Setup(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // create a role entry
+ data := map[string]interface{}{
+ "auth_type": "ec2",
+ "policies": "p,q,r,s",
+ "bound_ami_id": "abcd-123",
+ }
+ resp, err := b.HandleRequest(&logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "role/abcd-123",
+ Storage: storage,
+ Data: data,
+ })
+ if resp != nil && resp.IsError() {
+ t.Fatalf("failed to create role")
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // read the created role entry
+ roleEntry, err := b.lockedAWSRole(storage, "abcd-123")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // create a nonce for the role tag
+ nonce, err := createRoleTagNonce()
+ if err != nil {
+ t.Fatal(err)
+ }
+ rTag1 := &roleTag{
+ Version: "v1",
+ Role: "abcd-123",
+ Nonce: nonce,
+ Policies: []string{"p", "q", "r"},
+ MaxTTL: 200000000000, // 200s
+ }
+
+ // create a role tag against the role entry
+ val, err := createRoleTagValue(rTag1, roleEntry)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if val == "" {
+ t.Fatalf("failed to create role tag")
+ }
+
+ // parse the created role tag
+ rTag2, err := b.parseAndVerifyRoleTagValue(storage, val)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // check the values in parsed role tag
+ if rTag2.Version != "v1" ||
+ rTag2.Nonce != nonce ||
+ rTag2.Role != "abcd-123" ||
+ rTag2.MaxTTL != 200000000000 || // 200s
+ !policyutil.EquivalentPolicies(rTag2.Policies, []string{"p", "q", "r"}) ||
+ len(rTag2.HMAC) == 0 {
+ t.Fatalf("parsed role tag is invalid")
+ }
+
+ // verify the tag contents using role specific HMAC key
+ verified, err := verifyRoleTagValue(rTag2, roleEntry)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !verified {
+ t.Fatalf("failed to verify the role tag")
+ }
+
+ // register a different role
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "role/ami-6789",
+ Storage: storage,
+ Data: data,
+ })
+ if resp != nil && resp.IsError() {
+ t.Fatalf("failed to create role")
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // get the entry of the newly created role entry
+ roleEntry2, err := b.lockedAWSRole(storage, "ami-6789")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // try to verify the tag created with previous role's HMAC key
+ // with the newly registered entry's HMAC key
+ verified, err = verifyRoleTagValue(rTag2, roleEntry2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if verified {
+ t.Fatalf("verification of role tag should have failed")
+ }
+
+ // modify any value in role tag and try to verify it
+ rTag2.Version = "v2"
+ verified, err = verifyRoleTagValue(rTag2, roleEntry)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if verified {
+ t.Fatalf("verification of role tag should have failed: invalid Version")
+ }
+}
+
+func TestBackend_prepareRoleTagPlaintextValue(t *testing.T) {
+ // create a nonce for the role tag
+ nonce, err := createRoleTagNonce()
+ if err != nil {
+ t.Fatal(err)
+ }
+ rTag := &roleTag{
+ Version: "v1",
+ Nonce: nonce,
+ Role: "abcd-123",
+ }
+
+ rTag.Version = ""
+ // try to create plaintext part of role tag
+ // without specifying version
+ val, err := prepareRoleTagPlaintextValue(rTag)
+ if err == nil {
+ t.Fatalf("expected error for missing version")
+ }
+ rTag.Version = "v1"
+
+ rTag.Nonce = ""
+ // try to create plaintext part of role tag
+ // without specifying nonce
+ val, err = prepareRoleTagPlaintextValue(rTag)
+ if err == nil {
+ t.Fatalf("expected error for missing nonce")
+ }
+ rTag.Nonce = nonce
+
+ rTag.Role = ""
+ // try to create plaintext part of role tag
+ // without specifying role
+ val, err = prepareRoleTagPlaintextValue(rTag)
+ if err == nil {
+ t.Fatalf("expected error for missing role")
+ }
+ rTag.Role = "abcd-123"
+
+ // create the plaintext part of the tag
+ val, err = prepareRoleTagPlaintextValue(rTag)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // verify if it contains known fields
+ if !strings.Contains(val, "r=") ||
+ !strings.Contains(val, "d=") ||
+ !strings.Contains(val, "m=") ||
+ !strings.HasPrefix(val, "v1") {
+ t.Fatalf("incorrect information in role tag plaintext value")
+ }
+
+ rTag.InstanceID = "instance-123"
+ // create the role tag with instance_id specified
+ val, err = prepareRoleTagPlaintextValue(rTag)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // verify it
+ if !strings.Contains(val, "i=") {
+ t.Fatalf("missing instance ID in role tag plaintext value")
+ }
+
+ rTag.MaxTTL = 200000000000
+ // create the role tag with max_ttl specified
+ val, err = prepareRoleTagPlaintextValue(rTag)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // verify it
+ if !strings.Contains(val, "t=") {
+ t.Fatalf("missing max_ttl field in role tag plaintext value")
+ }
+}
+
+func TestBackend_CreateRoleTagNonce(t *testing.T) {
+ // create a nonce for the role tag
+ nonce, err := createRoleTagNonce()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if nonce == "" {
+ t.Fatalf("failed to create role tag nonce")
+ }
+
+ // verify that the value returned is base64 encoded
+ nonceBytes, err := base64.StdEncoding.DecodeString(nonce)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(nonceBytes) == 0 {
+ t.Fatalf("length of role tag nonce is zero")
+ }
+}
+
+func TestBackend_ConfigTidyIdentities(t *testing.T) {
+ // create a backend
+ config := logical.TestBackendConfig()
+ storage := &logical.InmemStorage{}
+ config.StorageView = storage
+
+ b, err := Backend(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = b.Setup(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // test update operation
+ tidyRequest := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "config/tidy/identity-whitelist",
+ Storage: storage,
+ }
+ data := map[string]interface{}{
+ "safety_buffer": "60",
+ "disable_periodic_tidy": true,
+ }
+ tidyRequest.Data = data
+ _, err = b.HandleRequest(tidyRequest)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // test read operation
+ tidyRequest.Operation = logical.ReadOperation
+ resp, err := b.HandleRequest(tidyRequest)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil || resp.IsError() {
+ t.Fatalf("failed to read config/tidy/identity-whitelist endpoint")
+ }
+ if resp.Data["safety_buffer"].(int) != 60 || !resp.Data["disable_periodic_tidy"].(bool) {
+ t.Fatalf("bad: expected: safety_buffer:60 disable_periodic_tidy:true actual: safety_buffer:%s disable_periodic_tidy:%t\n", resp.Data["safety_buffer"].(int), resp.Data["disable_periodic_tidy"].(bool))
+ }
+
+ // test delete operation
+ tidyRequest.Operation = logical.DeleteOperation
+ resp, err = b.HandleRequest(tidyRequest)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp != nil {
+ t.Fatalf("failed to delete config/tidy/identity-whitelist")
+ }
+}
+
+func TestBackend_ConfigTidyRoleTags(t *testing.T) {
+ config := logical.TestBackendConfig()
+ storage := &logical.InmemStorage{}
+ config.StorageView = storage
+
+ b, err := Backend(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = b.Setup(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // test update operation
+ tidyRequest := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "config/tidy/roletag-blacklist",
+ Storage: storage,
+ }
+ data := map[string]interface{}{
+ "safety_buffer": "60",
+ "disable_periodic_tidy": true,
+ }
+ tidyRequest.Data = data
+ _, err = b.HandleRequest(tidyRequest)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // test read operation
+ tidyRequest.Operation = logical.ReadOperation
+ resp, err := b.HandleRequest(tidyRequest)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil || resp.IsError() {
+ t.Fatalf("failed to read config/tidy/roletag-blacklist endpoint")
+ }
+ if resp.Data["safety_buffer"].(int) != 60 || !resp.Data["disable_periodic_tidy"].(bool) {
+ t.Fatalf("bad: expected: safety_buffer:60 disable_periodic_tidy:true actual: safety_buffer:%s disable_periodic_tidy:%t\n", resp.Data["safety_buffer"].(int), resp.Data["disable_periodic_tidy"].(bool))
+ }
+
+ // test delete operation
+ tidyRequest.Operation = logical.DeleteOperation
+ resp, err = b.HandleRequest(tidyRequest)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp != nil {
+ t.Fatalf("failed to delete config/tidy/roletag-blacklist")
+ }
+}
+
+func TestBackend_TidyIdentities(t *testing.T) {
+ config := logical.TestBackendConfig()
+ storage := &logical.InmemStorage{}
+ config.StorageView = storage
+
+ b, err := Backend(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = b.Setup(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // test update operation
+ _, err = b.HandleRequest(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "tidy/identity-whitelist",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestBackend_TidyRoleTags(t *testing.T) {
+ config := logical.TestBackendConfig()
+ storage := &logical.InmemStorage{}
+ config.StorageView = storage
+
+ b, err := Backend(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = b.Setup(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // test update operation
+ _, err = b.HandleRequest(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "tidy/roletag-blacklist",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestBackend_ConfigClient(t *testing.T) {
+ config := logical.TestBackendConfig()
+ storage := &logical.InmemStorage{}
+ config.StorageView = storage
+
+ b, err := Backend(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = b.Setup(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data := map[string]interface{}{"access_key": "AKIAJBRHKV6EVTTNXDHA",
+ "secret_key": "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",
+ }
+
+ stepCreate := logicaltest.TestStep{
+ Operation: logical.CreateOperation,
+ Path: "config/client",
+ Data: data,
+ }
+
+ stepUpdate := logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config/client",
+ Data: data,
+ }
+
+ data3 := map[string]interface{}{"access_key": "",
+ "secret_key": "mCtSM8ZUEQ3mOFVZYPBQkf2sO6F/W7a5TVzrl3Oj",
+ }
+ stepInvalidAccessKey := logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config/client",
+ Data: data3,
+ ErrorOk: true,
+ }
+
+ data4 := map[string]interface{}{"access_key": "accesskey",
+ "secret_key": "",
+ }
+ stepInvalidSecretKey := logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config/client",
+ Data: data4,
+ ErrorOk: true,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ AcceptanceTest: false,
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ stepCreate,
+ stepInvalidAccessKey,
+ stepInvalidSecretKey,
+ stepUpdate,
+ },
+ })
+
+ // test existence check returning false
+ checkFound, exists, err := b.HandleExistenceCheck(&logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "config/client",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !checkFound {
+ t.Fatal("existence check not found for path 'config/client'")
+ }
+ if exists {
+ t.Fatal("existence check should have returned 'false' for 'config/client'")
+ }
+
+ // create an entry
+ configClientCreateRequest := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "config/client",
+ Data: data,
+ Storage: storage,
+ }
+ _, err = b.HandleRequest(configClientCreateRequest)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ //test existence check returning true
+ checkFound, exists, err = b.HandleExistenceCheck(&logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "config/client",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !checkFound {
+ t.Fatal("existence check not found for path 'config/client'")
+ }
+ if !exists {
+ t.Fatal("existence check should have returned 'true' for 'config/client'")
+ }
+
+ endpointData := map[string]interface{}{
+ "secret_key": "secretkey",
+ "access_key": "accesskey",
+ "endpoint": "endpointvalue",
+ }
+
+ endpointReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "config/client",
+ Storage: storage,
+ Data: endpointData,
+ }
+ _, err = b.HandleRequest(endpointReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ endpointReq.Operation = logical.ReadOperation
+ resp, err := b.HandleRequest(endpointReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil ||
+ resp.IsError() {
+ t.Fatalf("")
+ }
+ actual := resp.Data["endpoint"].(string)
+ if actual != "endpointvalue" {
+ t.Fatalf("bad: endpoint: expected:endpointvalue actual:%s\n", actual)
+ }
+}
+
+func TestBackend_pathConfigCertificate(t *testing.T) {
+ config := logical.TestBackendConfig()
+ storage := &logical.InmemStorage{}
+ config.StorageView = storage
+
+ b, err := Backend(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = b.Setup(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ certReq := &logical.Request{
+ Operation: logical.CreateOperation,
+ Storage: storage,
+ Path: "config/certificate/cert1",
+ }
+ checkFound, exists, err := b.HandleExistenceCheck(certReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !checkFound {
+ t.Fatal("existence check not found for path 'config/certificate/cert1'")
+ }
+ if exists {
+ t.Fatal("existence check should have returned 'false' for 'config/certificate/cert1'")
+ }
+
+ data := map[string]interface{}{
+ "type": "pkcs7",
+ "aws_public_cert": `LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM3VENDQXEwQ0NRQ1d1a2paNVY0YVp6QUpC
+Z2NxaGtqT09BUURNRnd4Q3pBSkJnTlZCQVlUQWxWVE1Sa3cKRndZRFZRUUlFeEJYWVhOb2FXNW5k
+Rzl1SUZOMFlYUmxNUkF3RGdZRFZRUUhFd2RUWldGMGRHeGxNU0F3SGdZRApWUVFLRXhkQmJXRjZi
+MjRnVjJWaUlGTmxjblpwWTJWeklFeE1RekFlRncweE1qQXhNRFV4TWpVMk1USmFGdzB6Ck9EQXhN
+RFV4TWpVMk1USmFNRnd4Q3pBSkJnTlZCQVlUQWxWVE1Sa3dGd1lEVlFRSUV4QlhZWE5vYVc1bmRH
+OXUKSUZOMFlYUmxNUkF3RGdZRFZRUUhFd2RUWldGMGRHeGxNU0F3SGdZRFZRUUtFeGRCYldGNmIy
+NGdWMlZpSUZObApjblpwWTJWeklFeE1RekNDQWJjd2dnRXNCZ2NxaGtqT09BUUJNSUlCSHdLQmdR
+Q2prdmNTMmJiMVZRNHl0LzVlCmloNU9PNmtLL24xTHpsbHI3RDhad3RRUDhmT0VwcDVFMm5nK0Q2
+VWQxWjFnWWlwcjU4S2ozbnNzU05wSTZiWDMKVnlJUXpLN3dMY2xuZC9Zb3pxTk5tZ0l5WmVjTjdF
+Z2xLOUlUSEpMUCt4OEZ0VXB0M1FieVlYSmRtVk1lZ042UApodmlZdDVKSC9uWWw0aGgzUGExSEpk
+c2tnUUlWQUxWSjNFUjExK0tvNHRQNm53dkh3aDYrRVJZUkFvR0JBSTFqCmsrdGtxTVZIdUFGY3ZB
+R0tvY1Rnc2pKZW02LzVxb216SnVLRG1iSk51OVF4dzNyQW90WGF1OFFlK01CY0psL1UKaGh5MUtI
+VnBDR2w5ZnVlUTJzNklMMENhTy9idXljVTFDaVlRazQwS05IQ2NIZk5pWmJkbHgxRTlycFVwN2Ju
+RgpsUmEydjFudE1YM2NhUlZEZGJ0UEVXbWR4U0NZc1lGRGs0bVpyT0xCQTRHRUFBS0JnRWJtZXZl
+NWY4TElFL0dmCk1ObVA5Q001ZW92UU9HeDVobzhXcUQrYVRlYnMrazJ0bjkyQkJQcWVacXBXUmE1
+UC8ranJkS21sMXF4NGxsSFcKTVhyczNJZ0liNitoVUlCK1M4ZHo4L21tTzBicHI3NlJvWlZDWFlh
+YjJDWmVkRnV0N3FjM1dVSDkrRVVBSDVtdwp2U2VEQ09VTVlRUjdSOUxJTll3b3VISXppcVFZTUFr
+R0J5cUdTTTQ0QkFNREx3QXdMQUlVV1hCbGs0MHhUd1N3CjdIWDMyTXhYWXJ1c2U5QUNGQk5HbWRY
+MlpCclZOR3JOOU4yZjZST2swazlLCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
+`,
+ }
+
+ certReq.Data = data
+ // test create operation
+ resp, err := b.HandleRequest(certReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("resp: %#v, err: %v", resp, err)
+ }
+
+ certReq.Data = nil
+ // test existence check
+ checkFound, exists, err = b.HandleExistenceCheck(certReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !checkFound {
+ t.Fatal("existence check not found for path 'config/certificate/cert1'")
+ }
+ if !exists {
+ t.Fatal("existence check should have returned 'true' for 'config/certificate/cert1'")
+ }
+
+ certReq.Operation = logical.ReadOperation
+ // test read operation
+ resp, err = b.HandleRequest(certReq)
+ expectedCert := `-----BEGIN CERTIFICATE-----
+MIIC7TCCAq0CCQCWukjZ5V4aZzAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw
+FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD
+VQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xMjAxMDUxMjU2MTJaFw0z
+ODAxMDUxMjU2MTJaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u
+IFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl
+cnZpY2VzIExMQzCCAbcwggEsBgcqhkjOOAQBMIIBHwKBgQCjkvcS2bb1VQ4yt/5e
+ih5OO6kK/n1Lzllr7D8ZwtQP8fOEpp5E2ng+D6Ud1Z1gYipr58Kj3nssSNpI6bX3
+VyIQzK7wLclnd/YozqNNmgIyZecN7EglK9ITHJLP+x8FtUpt3QbyYXJdmVMegN6P
+hviYt5JH/nYl4hh3Pa1HJdskgQIVALVJ3ER11+Ko4tP6nwvHwh6+ERYRAoGBAI1j
+k+tkqMVHuAFcvAGKocTgsjJem6/5qomzJuKDmbJNu9Qxw3rAotXau8Qe+MBcJl/U
+hhy1KHVpCGl9fueQ2s6IL0CaO/buycU1CiYQk40KNHCcHfNiZbdlx1E9rpUp7bnF
+lRa2v1ntMX3caRVDdbtPEWmdxSCYsYFDk4mZrOLBA4GEAAKBgEbmeve5f8LIE/Gf
+MNmP9CM5eovQOGx5ho8WqD+aTebs+k2tn92BBPqeZqpWRa5P/+jrdKml1qx4llHW
+MXrs3IgIb6+hUIB+S8dz8/mmO0bpr76RoZVCXYab2CZedFut7qc3WUH9+EUAH5mw
+vSeDCOUMYQR7R9LINYwouHIziqQYMAkGByqGSM44BAMDLwAwLAIUWXBlk40xTwSw
+7HX32MxXYruse9ACFBNGmdX2ZBrVNGrN9N2f6ROk0k9K
+-----END CERTIFICATE-----
+`
+ if resp.Data["aws_public_cert"].(string) != expectedCert {
+ t.Fatalf("bad: expected:%s\n got:%s\n", expectedCert, resp.Data["aws_public_cert"].(string))
+ }
+
+ certReq.Operation = logical.CreateOperation
+ certReq.Path = "config/certificate/cert2"
+ certReq.Data = data
+ // create another entry to test the list operation
+ _, err = b.HandleRequest(certReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ certReq.Operation = logical.ListOperation
+ certReq.Path = "config/certificates"
+ // test list operation
+ resp, err = b.HandleRequest(certReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil || resp.IsError() {
+ t.Fatalf("failed to list config/certificates")
+ }
+ keys := resp.Data["keys"].([]string)
+ if len(keys) != 2 {
+ t.Fatalf("invalid keys listed: %#v\n", keys)
+ }
+
+ certReq.Operation = logical.DeleteOperation
+ certReq.Path = "config/certificate/cert1"
+ _, err = b.HandleRequest(certReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ certReq.Path = "config/certificate/cert2"
+ _, err = b.HandleRequest(certReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ certReq.Operation = logical.ListOperation
+ certReq.Path = "config/certificates"
+ // test list operation
+ resp, err = b.HandleRequest(certReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil || resp.IsError() {
+ t.Fatalf("failed to list config/certificates")
+ }
+ if resp.Data["keys"] != nil {
+ t.Fatalf("no entries should be present")
+ }
+}
+
+func TestBackend_parseAndVerifyRoleTagValue(t *testing.T) {
+ // create a backend
+ config := logical.TestBackendConfig()
+ storage := &logical.InmemStorage{}
+ config.StorageView = storage
+ b, err := Backend(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = b.Setup(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // create a role
+ data := map[string]interface{}{
+ "auth_type": "ec2",
+ "policies": "p,q,r,s",
+ "max_ttl": "120s",
+ "role_tag": "VaultRole",
+ "bound_ami_id": "abcd-123",
+ }
+ resp, err := b.HandleRequest(&logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "role/abcd-123",
+ Storage: storage,
+ Data: data,
+ })
+ if resp != nil && resp.IsError() {
+ t.Fatalf("failed to create role")
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // verify that the entry is created
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "role/abcd-123",
+ Storage: storage,
+ })
+ if resp == nil {
+ t.Fatalf("expected an role entry for abcd-123")
+ }
+
+ // create a role tag
+ data2 := map[string]interface{}{
+ "policies": "p,q,r,s",
+ }
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "role/abcd-123/tag",
+ Storage: storage,
+ Data: data2,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp.Data["tag_key"].(string) == "" ||
+ resp.Data["tag_value"].(string) == "" {
+ t.Fatalf("invalid tag response: %#v\n", resp)
+ }
+ tagValue := resp.Data["tag_value"].(string)
+
+ // parse the value and check if the verifiable values match
+ rTag, err := b.parseAndVerifyRoleTagValue(storage, tagValue)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if rTag == nil {
+ t.Fatalf("failed to parse role tag")
+ }
+ if rTag.Version != "v1" ||
+ !policyutil.EquivalentPolicies(rTag.Policies, []string{"p", "q", "r", "s"}) ||
+ rTag.Role != "abcd-123" {
+ t.Fatalf("bad: parsed role tag contains incorrect values. Got: %#v\n", rTag)
+ }
+}
+
+func TestBackend_PathRoleTag(t *testing.T) {
+ config := logical.TestBackendConfig()
+ storage := &logical.InmemStorage{}
+ config.StorageView = storage
+ b, err := Backend(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = b.Setup(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data := map[string]interface{}{
+ "auth_type": "ec2",
+ "policies": "p,q,r,s",
+ "max_ttl": "120s",
+ "role_tag": "VaultRole",
+ "bound_ami_id": "abcd-123",
+ }
+ resp, err := b.HandleRequest(&logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "role/abcd-123",
+ Storage: storage,
+ Data: data,
+ })
+ if resp != nil && resp.IsError() {
+ t.Fatalf("failed to create role")
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "role/abcd-123",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil {
+ t.Fatalf("failed to find a role entry for abcd-123")
+ }
+
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "role/abcd-123/tag",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil || resp.Data == nil {
+ t.Fatalf("failed to create a tag on role: abcd-123")
+ }
+ if resp.IsError() {
+ t.Fatalf("failed to create a tag on role: abcd-123: %s\n", resp.Data["error"])
+ }
+ if resp.Data["tag_value"].(string) == "" {
+ t.Fatalf("role tag not present in the response data: %#v\n", resp.Data)
+ }
+}
+
+func TestBackend_PathBlacklistRoleTag(t *testing.T) {
+ // create the backend
+ storage := &logical.InmemStorage{}
+ config := logical.TestBackendConfig()
+ config.StorageView = storage
+ b, err := Backend(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = b.Setup(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // create an role entry
+ data := map[string]interface{}{
+ "auth_type": "ec2",
+ "policies": "p,q,r,s",
+ "role_tag": "VaultRole",
+ "bound_ami_id": "abcd-123",
+ }
+ resp, err := b.HandleRequest(&logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "role/abcd-123",
+ Storage: storage,
+ Data: data,
+ })
+ if resp != nil && resp.IsError() {
+ t.Fatalf("failed to create role")
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // create a role tag against an role registered before
+ data2 := map[string]interface{}{
+ "policies": "p,q,r,s",
+ }
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "role/abcd-123/tag",
+ Storage: storage,
+ Data: data2,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil || resp.Data == nil {
+ t.Fatalf("failed to create a tag on role: abcd-123")
+ }
+ if resp.IsError() {
+ t.Fatalf("failed to create a tag on role: abcd-123: %s\n", resp.Data["error"])
+ }
+ tag := resp.Data["tag_value"].(string)
+ if tag == "" {
+ t.Fatalf("role tag not present in the response data: %#v\n", resp.Data)
+ }
+
+ // blacklist that role tag
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "roletag-blacklist/" + tag,
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp != nil {
+ t.Fatalf("failed to blacklist the roletag: %s\n", tag)
+ }
+
+ // read the blacklist entry
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "roletag-blacklist/" + tag,
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil || resp.Data == nil {
+ t.Fatalf("failed to read the blacklisted role tag: %s\n", tag)
+ }
+ if resp.IsError() {
+ t.Fatalf("failed to read the blacklisted role tag:%s. Err: %s\n", tag, resp.Data["error"])
+ }
+
+ // delete the blacklisted entry
+ _, err = b.HandleRequest(&logical.Request{
+ Operation: logical.DeleteOperation,
+ Path: "roletag-blacklist/" + tag,
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // try to read the deleted entry
+ tagEntry, err := b.lockedBlacklistRoleTagEntry(storage, tag)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if tagEntry != nil {
+ t.Fatalf("role tag should not have been present: %s\n", tag)
+ }
+}
+
+// This is an acceptance test.
+// Requires the following env vars:
+// TEST_AWS_EC2_PKCS7
+// TEST_AWS_EC2_AMI_ID
+// TEST_AWS_EC2_ACCOUNT_ID
+// TEST_AWS_EC2_IAM_ROLE_ARN
+//
+// If the test is not being run on an EC2 instance that has access to
+// credentials using EC2RoleProvider, on top of the above vars, following
+// needs to be set:
+// TEST_AWS_SECRET_KEY
+// TEST_AWS_ACCESS_KEY
+func TestBackendAcc_LoginWithInstanceIdentityDocAndWhitelistIdentity(t *testing.T) {
+ // This test case should be run only when certain env vars are set and
+ // executed as an acceptance test.
+ if os.Getenv(logicaltest.TestEnvVar) == "" {
+ t.Skip(fmt.Sprintf("Acceptance tests skipped unless env '%s' set", logicaltest.TestEnvVar))
+ return
+ }
+
+ pkcs7 := os.Getenv("TEST_AWS_EC2_PKCS7")
+ if pkcs7 == "" {
+ t.Fatalf("env var TEST_AWS_EC2_PKCS7 not set")
+ }
+
+ amiID := os.Getenv("TEST_AWS_EC2_AMI_ID")
+ if amiID == "" {
+ t.Fatalf("env var TEST_AWS_EC2_AMI_ID not set")
+ }
+
+ iamARN := os.Getenv("TEST_AWS_EC2_IAM_ROLE_ARN")
+ if iamARN == "" {
+ t.Fatalf("env var TEST_AWS_EC2_IAM_ROLE_ARN not set")
+ }
+
+ accountID := os.Getenv("TEST_AWS_EC2_ACCOUNT_ID")
+ if accountID == "" {
+ t.Fatalf("env var TEST_AWS_EC2_ACCOUNT_ID not set")
+ }
+
+ roleName := amiID
+
+ // create the backend
+ storage := &logical.InmemStorage{}
+ config := logical.TestBackendConfig()
+ config.StorageView = storage
+ b, err := Backend(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = b.Setup(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ accessKey := os.Getenv("TEST_AWS_ACCESS_KEY")
+ secretKey := os.Getenv("TEST_AWS_SECRET_KEY")
+
+ // In case of problems with making API calls using the credentials (2FA enabled,
+ // for instance), the keys need not be set if the test is running on an EC2
+ // instance with permissions to get the credentials using EC2RoleProvider.
+ if accessKey != "" && secretKey != "" {
+ // get the API credentials from env vars
+ clientConfig := map[string]interface{}{
+ "access_key": accessKey,
+ "secret_key": secretKey,
+ }
+ if clientConfig["access_key"] == "" ||
+ clientConfig["secret_key"] == "" {
+ t.Fatalf("credentials not configured")
+ }
+
+ // store the credentials
+ _, err = b.HandleRequest(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Storage: storage,
+ Path: "config/client",
+ Data: clientConfig,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ loginInput := map[string]interface{}{
+ "pkcs7": pkcs7,
+ "nonce": "vault-client-nonce",
+ }
+
+ // Perform the login operation with a AMI ID that is not matching
+ // the bound on the role.
+ loginRequest := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "login",
+ Storage: storage,
+ Data: loginInput,
+ }
+
+ // Place the wrong AMI ID in the role data.
+ data := map[string]interface{}{
+ "auth_type": "ec2",
+ "policies": "root",
+ "max_ttl": "120s",
+ "bound_ami_id": "wrong_ami_id",
+ "bound_account_id": accountID,
+ "bound_iam_role_arn": iamARN,
+ }
+
+ roleReq := &logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "role/" + roleName,
+ Storage: storage,
+ Data: data,
+ }
+
+ // Save the role with wrong AMI ID
+ resp, err := b.HandleRequest(roleReq)
+ if err != nil && (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: resp: %#v\nerr:%v", resp, err)
+ }
+
+ // Expect failure when tried to login with wrong AMI ID
+ resp, err = b.HandleRequest(loginRequest)
+ if err != nil || resp == nil || (resp != nil && !resp.IsError()) {
+ t.Fatalf("bad: expected error response: resp:%#v\nerr:%v", resp, err)
+ }
+
+ // Place the correct AMI ID, but make the AccountID wrong
+ roleReq.Operation = logical.UpdateOperation
+ data["bound_ami_id"] = amiID
+ data["bound_account_id"] = "wrong-account-id"
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: failed to create role: resp:%#v\nerr:%v", resp, err)
+ }
+
+ // Expect failure when tried to login with incorrect AccountID
+ resp, err = b.HandleRequest(loginRequest)
+ if err != nil || resp == nil || (resp != nil && !resp.IsError()) {
+ t.Fatalf("bad: expected error response: resp:%#v\nerr:%v", resp, err)
+ }
+
+ // Place the correct AccountID, but make the wrong IAMRoleARN
+ data["bound_account_id"] = accountID
+ data["bound_iam_role_arn"] = "wrong_iam_role_arn"
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: failed to create role: resp:%#v\nerr:%v", resp, err)
+ }
+
+ // Attempt to login and expect a fail because IAM Role ARN is wrong
+ resp, err = b.HandleRequest(loginRequest)
+ if err != nil || resp == nil || (resp != nil && !resp.IsError()) {
+ t.Fatalf("bad: expected error response: resp:%#v\nerr:%v", resp, err)
+ }
+
+ // place the correct IAM role ARN
+ data["bound_iam_role_arn"] = iamARN
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: failed to create role: resp:%#v\nerr:%v", resp, err)
+ }
+
+ // Now, the login attempt should succeed
+ resp, err = b.HandleRequest(loginRequest)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil || resp.Auth == nil || resp.IsError() {
+ t.Fatalf("bad: failed to login: resp:%#v\nerr:%v", resp, err)
+ }
+
+ // verify the presence of instance_id in the response object.
+ instanceID := resp.Auth.Metadata["instance_id"]
+ if instanceID == "" {
+ t.Fatalf("instance ID not present in the response object")
+ }
+
+ loginInput["nonce"] = "changed-vault-client-nonce"
+ // try to login again with changed nonce
+ resp, err = b.HandleRequest(loginRequest)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil || !resp.IsError() {
+ t.Fatalf("login attempt should have failed due to client nonce mismatch")
+ }
+
+ // Check if a whitelist identity entry is created after the login.
+ wlRequest := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "identity-whitelist/" + instanceID,
+ Storage: storage,
+ }
+ resp, err = b.HandleRequest(wlRequest)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil || resp.Data == nil || resp.Data["role"] != roleName {
+ t.Fatalf("failed to read whitelist identity")
+ }
+
+ // Delete the whitelist identity entry.
+ wlRequest.Operation = logical.DeleteOperation
+ resp, err = b.HandleRequest(wlRequest)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp.IsError() {
+ t.Fatalf("failed to delete whitelist identity")
+ }
+
+ // Allow a fresh login.
+ resp, err = b.HandleRequest(loginRequest)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil || resp.Auth == nil || resp.IsError() {
+ t.Fatalf("login attempt failed")
+ }
+}
+
+func TestBackend_pathStsConfig(t *testing.T) {
+ config := logical.TestBackendConfig()
+ storage := &logical.InmemStorage{}
+ config.StorageView = storage
+ b, err := Backend(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = b.Setup(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ stsReq := &logical.Request{
+ Operation: logical.CreateOperation,
+ Storage: storage,
+ Path: "config/sts/account1",
+ }
+ checkFound, exists, err := b.HandleExistenceCheck(stsReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !checkFound {
+ t.Fatal("existence check not found for path 'config/sts/account1'")
+ }
+ if exists {
+ t.Fatal("existence check should have returned 'false' for 'config/sts/account1'")
+ }
+
+ data := map[string]interface{}{
+ "sts_role": "arn:aws:iam:account1:role/myRole",
+ }
+
+ stsReq.Data = data
+ // test create operation
+ resp, err := b.HandleRequest(stsReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("resp: %#v, err: %v", resp, err)
+ }
+
+ stsReq.Data = nil
+ // test existence check
+ checkFound, exists, err = b.HandleExistenceCheck(stsReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !checkFound {
+ t.Fatal("existence check not found for path 'config/sts/account1'")
+ }
+ if !exists {
+ t.Fatal("existence check should have returned 'true' for 'config/sts/account1'")
+ }
+
+ stsReq.Operation = logical.ReadOperation
+ // test read operation
+ resp, err = b.HandleRequest(stsReq)
+ expectedStsRole := "arn:aws:iam:account1:role/myRole"
+ if resp.Data["sts_role"].(string) != expectedStsRole {
+ t.Fatalf("bad: expected:%s\n got:%s\n", expectedStsRole, resp.Data["sts_role"].(string))
+ }
+
+ stsReq.Operation = logical.CreateOperation
+ stsReq.Path = "config/sts/account2"
+ stsReq.Data = data
+ // create another entry to test the list operation
+ resp, err = b.HandleRequest(stsReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatal(err)
+ }
+
+ stsReq.Operation = logical.ListOperation
+ stsReq.Path = "config/sts"
+ // test list operation
+ resp, err = b.HandleRequest(stsReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil || resp.IsError() {
+ t.Fatalf("failed to list config/sts")
+ }
+ keys := resp.Data["keys"].([]string)
+ if len(keys) != 2 {
+ t.Fatalf("invalid keys listed: %#v\n", keys)
+ }
+
+ stsReq.Operation = logical.DeleteOperation
+ stsReq.Path = "config/sts/account1"
+ resp, err = b.HandleRequest(stsReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatal(err)
+ }
+
+ stsReq.Path = "config/sts/account2"
+ resp, err = b.HandleRequest(stsReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatal(err)
+ }
+
+ stsReq.Operation = logical.ListOperation
+ stsReq.Path = "config/sts"
+ // test list operation
+ resp, err = b.HandleRequest(stsReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil || resp.IsError() {
+ t.Fatalf("failed to list config/sts")
+ }
+ if resp.Data["keys"] != nil {
+ t.Fatalf("no entries should be present")
+ }
+}
+
+func buildCallerIdentityLoginData(request *http.Request, roleName string) (map[string]interface{}, error) {
+ headersJson, err := json.Marshal(request.Header)
+ if err != nil {
+ return nil, err
+ }
+ requestBody, err := ioutil.ReadAll(request.Body)
+ if err != nil {
+ return nil, err
+ }
+ return map[string]interface{}{
+ "iam_http_request_method": request.Method,
+ "iam_request_url": base64.StdEncoding.EncodeToString([]byte(request.URL.String())),
+ "iam_request_headers": base64.StdEncoding.EncodeToString(headersJson),
+ "iam_request_body": base64.StdEncoding.EncodeToString(requestBody),
+ "request_role": roleName,
+ }, nil
+}
+
+// This is an acceptance test.
+// If the test is NOT being run on an AWS EC2 instance in an instance profile,
+// it requires the following environment variables to be set:
+// TEST_AWS_ACCESS_KEY_ID
+// TEST_AWS_SECRET_ACCESS_KEY
+// TEST_AWS_SECURITY_TOKEN or TEST_AWS_SESSION_TOKEN (optional, if you are using short-lived creds)
+// These are intentionally NOT the "standard" variables to prevent accidentally
+// using prod creds in acceptance tests
+func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) {
+ // This test case should be run only when certain env vars are set and
+ // executed as an acceptance test.
+ if os.Getenv(logicaltest.TestEnvVar) == "" {
+ t.Skip(fmt.Sprintf("Acceptance tests skipped unless env '%s' set", logicaltest.TestEnvVar))
+ return
+ }
+
+ storage := &logical.InmemStorage{}
+ config := logical.TestBackendConfig()
+ config.StorageView = storage
+ b, err := Backend(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = b.Setup(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Override the default AWS env vars (if set) with our test creds
+ // so that the credential provider chain will pick them up
+ // NOTE that I'm not bothing to override the shared config file location,
+ // so if creds are specified there, they will be used before IAM
+ // instance profile creds
+ // This doesn't provide perfect leakage protection (e.g., it will still
+ // potentially pick up credentials from the ~/.config files), but probably
+ // good enough rather than having to muck around in the low-level details
+ for _, envvar := range []string{
+ "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SECURITY_TOKEN", "AWS_SESSION_TOKEN"} {
+ // restore existing environment variables (in case future tests need them)
+ defer os.Setenv(envvar, os.Getenv(envvar))
+ os.Setenv(envvar, os.Getenv("TEST_"+envvar))
+ }
+ awsSession, err := session.NewSession()
+ if err != nil {
+ fmt.Println("failed to create session,", err)
+ return
+ }
+
+ stsService := sts.New(awsSession)
+ stsInputParams := &sts.GetCallerIdentityInput{}
+
+ testIdentity, err := stsService.GetCallerIdentity(stsInputParams)
+ if err != nil {
+ t.Fatalf("Received error retrieving identity: %s", err)
+ }
+ testIdentityArn, _, _, err := parseIamArn(*testIdentity.Arn)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Test setup largely done
+ // At this point, we're going to:
+ // 1. Configure the client to require our test header value
+ // 2. Configure two different roles:
+ // a. One bound to our test user
+ // b. One bound to a garbage ARN
+ // 3. Pass in a request that doesn't have the signed header, ensure
+ // we're not allowed to login
+ // 4. Passin a request that has a validly signed header, but the wrong
+ // value, ensure it doesn't allow login
+ // 5. Pass in a request that has a validly signed request, ensure
+ // it allows us to login to our role
+ // 6. Pass in a request that has a validly signed request, asking for
+ // the other role, ensure it fails
+ const testVaultHeaderValue = "VaultAcceptanceTesting"
+ const testValidRoleName = "valid-role"
+ const testInvalidRoleName = "invalid-role"
+
+ clientConfigData := map[string]interface{}{
+ "iam_server_id_header_value": testVaultHeaderValue,
+ }
+ clientRequest := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "config/client",
+ Storage: storage,
+ Data: clientConfigData,
+ }
+ _, err = b.HandleRequest(clientRequest)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // configuring the valid role we'll be able to login to
+ roleData := map[string]interface{}{
+ "bound_iam_principal_arn": testIdentityArn,
+ "policies": "root",
+ "auth_type": iamAuthType,
+ }
+ roleRequest := &logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "role/" + testValidRoleName,
+ Storage: storage,
+ Data: roleData,
+ }
+ resp, err := b.HandleRequest(roleRequest)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: failed to create role: resp:%#v\nerr:%v", resp, err)
+ }
+
+ // configuring a valid role we won't be able to login to
+ roleDataEc2 := map[string]interface{}{
+ "auth_type": "ec2",
+ "policies": "root",
+ "bound_ami_id": "ami-1234567",
+ }
+ roleRequestEc2 := &logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "role/ec2only",
+ Storage: storage,
+ Data: roleDataEc2,
+ }
+ resp, err = b.HandleRequest(roleRequestEc2)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: failed to create role; resp:%#v\nerr:%v", resp, err)
+ }
+
+ // now we're creating the invalid role we won't be able to login to
+ roleData["bound_iam_principal_arn"] = "arn:aws:iam::123456789012:role/FakeRole"
+ roleRequest.Path = "role/" + testInvalidRoleName
+ resp, err = b.HandleRequest(roleRequest)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: didn't fail to create role: resp:%#v\nerr:%v", resp, err)
+ }
+
+ // now, create the request without the signed header
+ stsRequestNoHeader, _ := stsService.GetCallerIdentityRequest(stsInputParams)
+ stsRequestNoHeader.Sign()
+ loginData, err := buildCallerIdentityLoginData(stsRequestNoHeader.HTTPRequest, testValidRoleName)
+ if err != nil {
+ t.Fatal(err)
+ }
+ loginRequest := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "login",
+ Storage: storage,
+ Data: loginData,
+ }
+ resp, err = b.HandleRequest(loginRequest)
+ if err != nil || resp == nil || !resp.IsError() {
+ t.Errorf("bad: expected failed login due to missing header: resp:%#v\nerr:%v", resp, err)
+ }
+
+ // create the request with the invalid header value
+
+ // Not reusing stsRequestNoHeader because the process of signing the request
+ // and reading the body modifies the underlying request, so it's just cleaner
+ // to get new requests.
+ stsRequestInvalidHeader, _ := stsService.GetCallerIdentityRequest(stsInputParams)
+ stsRequestInvalidHeader.HTTPRequest.Header.Add(iamServerIdHeader, "InvalidValue")
+ stsRequestInvalidHeader.Sign()
+ loginData, err = buildCallerIdentityLoginData(stsRequestInvalidHeader.HTTPRequest, testValidRoleName)
+ if err != nil {
+ t.Fatal(err)
+ }
+ loginRequest = &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "login",
+ Storage: storage,
+ Data: loginData,
+ }
+ resp, err = b.HandleRequest(loginRequest)
+ if err != nil || resp == nil || !resp.IsError() {
+ t.Errorf("bad: expected failed login due to invalid header: resp:%#v\nerr:%v", resp, err)
+ }
+
+ // Now, valid request against invalid role
+ stsRequestValid, _ := stsService.GetCallerIdentityRequest(stsInputParams)
+ stsRequestValid.HTTPRequest.Header.Add(iamServerIdHeader, testVaultHeaderValue)
+ stsRequestValid.Sign()
+ loginData, err = buildCallerIdentityLoginData(stsRequestValid.HTTPRequest, testInvalidRoleName)
+ if err != nil {
+ t.Fatal(err)
+ }
+ loginRequest = &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "login",
+ Storage: storage,
+ Data: loginData,
+ }
+ resp, err = b.HandleRequest(loginRequest)
+ if err != nil || resp == nil || !resp.IsError() {
+ t.Errorf("bad: expected failed login due to invalid role: resp:%#v\nerr:%v", resp, err)
+ }
+
+ loginData["role"] = "ec2only"
+ resp, err = b.HandleRequest(loginRequest)
+ if err != nil || resp == nil || !resp.IsError() {
+ t.Errorf("bad: expected failed login due to bad auth type: resp:%#v\nerr:%v", resp, err)
+ }
+
+ // finally, the happy path tests :)
+
+ loginData["role"] = testValidRoleName
+ resp, err = b.HandleRequest(loginRequest)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil || resp.Auth == nil || resp.IsError() {
+ t.Errorf("bad: expected valid login: resp:%#v", resp)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/cli.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/cli.go
new file mode 100644
index 0000000..c69187f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/cli.go
@@ -0,0 +1,129 @@
+package awsauth
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/sts"
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/helper/awsutil"
+)
+
+type CLIHandler struct{}
+
+func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) {
+ mount, ok := m["mount"]
+ if !ok {
+ mount = "aws"
+ }
+
+ role, ok := m["role"]
+ if !ok {
+ role = ""
+ }
+
+ headerValue, ok := m["header_value"]
+ if !ok {
+ headerValue = ""
+ }
+
+ // Grab any supplied credentials off the command line
+ // Ensure we're able to fall back to the SDK default credential providers
+ credConfig := &awsutil.CredentialsConfig{
+ AccessKey: m["aws_access_key_id"],
+ SecretKey: m["aws_secret_access_key"],
+ SessionToken: m["aws_security_token"],
+ }
+ creds, err := credConfig.GenerateCredentialChain()
+ if err != nil {
+ return "", err
+ }
+ if creds == nil {
+ return "", fmt.Errorf("could not compile valid credential providers from static config, environemnt, shared, or instance metadata")
+ }
+
+ // Use the credentials we've found to construct an STS session
+ stsSession, err := session.NewSessionWithOptions(session.Options{
+ Config: aws.Config{Credentials: creds},
+ })
+ if err != nil {
+ return "", err
+ }
+
+ var params *sts.GetCallerIdentityInput
+ svc := sts.New(stsSession)
+ stsRequest, _ := svc.GetCallerIdentityRequest(params)
+
+ // Inject the required auth header value, if suplied, and then sign the request including that header
+ if headerValue != "" {
+ stsRequest.HTTPRequest.Header.Add(iamServerIdHeader, headerValue)
+ }
+ stsRequest.Sign()
+
+ // Now extract out the relevant parts of the request
+ headersJson, err := json.Marshal(stsRequest.HTTPRequest.Header)
+ if err != nil {
+ return "", err
+ }
+ requestBody, err := ioutil.ReadAll(stsRequest.HTTPRequest.Body)
+ if err != nil {
+ return "", err
+ }
+ method := stsRequest.HTTPRequest.Method
+ targetUrl := base64.StdEncoding.EncodeToString([]byte(stsRequest.HTTPRequest.URL.String()))
+ headers := base64.StdEncoding.EncodeToString(headersJson)
+ body := base64.StdEncoding.EncodeToString(requestBody)
+
+ // And pass them on to the Vault server
+ path := fmt.Sprintf("auth/%s/login", mount)
+ secret, err := c.Logical().Write(path, map[string]interface{}{
+ "iam_http_request_method": method,
+ "iam_request_url": targetUrl,
+ "iam_request_headers": headers,
+ "iam_request_body": body,
+ "role": role,
+ })
+
+ if err != nil {
+ return "", err
+ }
+ if secret == nil {
+ return "", fmt.Errorf("empty response from credential provider")
+ }
+
+ return secret.Auth.ClientToken, nil
+}
+
+func (h *CLIHandler) Help() string {
+ help := `
+The AWS credential provider allows you to authenticate with
+AWS IAM credentials. To use it, you specify valid AWS IAM credentials
+in one of a number of ways. They can be specified explicitly on the
+command line (which in general you should not do), via the standard AWS
+environment variables (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, and
+AWS_SECURITY_TOKEN), via the ~/.aws/credentials file, or via an EC2
+instance profile (in that order).
+
+ Example: vault auth -method=aws
+
+If you need to explicitly pass in credentials, you would do it like this:
+ Example: vault auth -method=aws aws_access_key_id= aws_secret_access_key= aws_security_token=
+
+Key/Value Pairs:
+
+ mount=aws The mountpoint for the AWS credential provider.
+ Defaults to "aws"
+ aws_access_key_id= Explicitly specified AWS access key
+ aws_secret_access_key= Explicitly specified AWS secret key
+ aws_security_token= Security token for temporary credentials
+ header_value The Value of the X-Vault-AWS-IAM-Server-ID header.
+ role The name of the role you're requesting a token for
+ `
+
+ return strings.TrimSpace(help)
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/client.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/client.go
new file mode 100644
index 0000000..1647f45
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/client.go
@@ -0,0 +1,213 @@
+package awsauth
+
+import (
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials/stscreds"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/ec2"
+ "github.com/aws/aws-sdk-go/service/iam"
+ "github.com/hashicorp/go-cleanhttp"
+ "github.com/hashicorp/vault/helper/awsutil"
+ "github.com/hashicorp/vault/logical"
+)
+
+// getRawClientConfig creates a aws-sdk-go config, which is used to create client
+// that can interact with AWS API. This builds credentials in the following
+// order of preference:
+//
+// * Static credentials from 'config/client'
+// * Environment variables
+// * Instance metadata role
+func (b *backend) getRawClientConfig(s logical.Storage, region, clientType string) (*aws.Config, error) {
+ credsConfig := &awsutil.CredentialsConfig{
+ Region: region,
+ }
+
+ // Read the configured secret key and access key
+ config, err := b.nonLockedClientConfigEntry(s)
+ if err != nil {
+ return nil, err
+ }
+
+ endpoint := aws.String("")
+ if config != nil {
+ // Override the default endpoint with the configured endpoint.
+ switch {
+ case clientType == "ec2" && config.Endpoint != "":
+ endpoint = aws.String(config.Endpoint)
+ case clientType == "iam" && config.IAMEndpoint != "":
+ endpoint = aws.String(config.IAMEndpoint)
+ case clientType == "sts" && config.STSEndpoint != "":
+ endpoint = aws.String(config.STSEndpoint)
+ }
+
+ credsConfig.AccessKey = config.AccessKey
+ credsConfig.SecretKey = config.SecretKey
+ }
+
+ credsConfig.HTTPClient = cleanhttp.DefaultClient()
+
+ creds, err := credsConfig.GenerateCredentialChain()
+ if err != nil {
+ return nil, err
+ }
+ if creds == nil {
+ return nil, fmt.Errorf("could not compile valid credential providers from static config, environment, shared, or instance metadata")
+ }
+
+ // Create a config that can be used to make the API calls.
+ return &aws.Config{
+ Credentials: creds,
+ Region: aws.String(region),
+ HTTPClient: cleanhttp.DefaultClient(),
+ Endpoint: endpoint,
+ }, nil
+}
+
+// getClientConfig returns an aws-sdk-go config, with optionally assumed credentials
+// It uses getRawClientConfig to obtain config for the runtime environemnt, and if
+// stsRole is a non-empty string, it will use AssumeRole to obtain a set of assumed
+// credentials. The credentials will expire after 15 minutes but will auto-refresh.
+func (b *backend) getClientConfig(s logical.Storage, region, stsRole, clientType string) (*aws.Config, error) {
+
+ config, err := b.getRawClientConfig(s, region, clientType)
+ if err != nil {
+ return nil, err
+ }
+ if config == nil {
+ return nil, fmt.Errorf("could not compile valid credentials through the default provider chain")
+ }
+
+ if stsRole != "" {
+ assumeRoleConfig, err := b.getRawClientConfig(s, region, "sts")
+ if err != nil {
+ return nil, err
+ }
+ if assumeRoleConfig == nil {
+ return nil, fmt.Errorf("could not configure STS client")
+ }
+ assumedCredentials := stscreds.NewCredentials(session.New(assumeRoleConfig), stsRole)
+ // Test that we actually have permissions to assume the role
+ if _, err = assumedCredentials.Get(); err != nil {
+ return nil, err
+ }
+ config.Credentials = assumedCredentials
+ }
+
+ return config, nil
+}
+
+// flushCachedEC2Clients deletes all the cached ec2 client objects from the backend.
+// If the client credentials configuration is deleted or updated in the backend, all
+// the cached EC2 client objects will be flushed. Config mutex lock should be
+// acquired for write operation before calling this method.
+func (b *backend) flushCachedEC2Clients() {
+ // deleting items in map during iteration is safe
+ for region, _ := range b.EC2ClientsMap {
+ delete(b.EC2ClientsMap, region)
+ }
+}
+
+// flushCachedIAMClients deletes all the cached iam client objects from the
+// backend. If the client credentials configuration is deleted or updated in
+// the backend, all the cached IAM client objects will be flushed. Config mutex
+// lock should be acquired for write operation before calling this method.
+func (b *backend) flushCachedIAMClients() {
+ // deleting items in map during iteration is safe
+ for region, _ := range b.IAMClientsMap {
+ delete(b.IAMClientsMap, region)
+ }
+}
+
+// clientEC2 creates a client to interact with AWS EC2 API
+func (b *backend) clientEC2(s logical.Storage, region string, stsRole string) (*ec2.EC2, error) {
+ b.configMutex.RLock()
+ if b.EC2ClientsMap[region] != nil && b.EC2ClientsMap[region][stsRole] != nil {
+ defer b.configMutex.RUnlock()
+ // If the client object was already created, return it
+ return b.EC2ClientsMap[region][stsRole], nil
+ }
+
+ // Release the read lock and acquire the write lock
+ b.configMutex.RUnlock()
+ b.configMutex.Lock()
+ defer b.configMutex.Unlock()
+
+ // If the client gets created while switching the locks, return it
+ if b.EC2ClientsMap[region] != nil && b.EC2ClientsMap[region][stsRole] != nil {
+ return b.EC2ClientsMap[region][stsRole], nil
+ }
+
+ // Create an AWS config object using a chain of providers
+ var awsConfig *aws.Config
+ var err error
+ awsConfig, err = b.getClientConfig(s, region, stsRole, "ec2")
+
+ if err != nil {
+ return nil, err
+ }
+
+ if awsConfig == nil {
+ return nil, fmt.Errorf("could not retrieve valid assumed credentials")
+ }
+
+ // Create a new EC2 client object, cache it and return the same
+ client := ec2.New(session.New(awsConfig))
+ if client == nil {
+ return nil, fmt.Errorf("could not obtain ec2 client")
+ }
+ if _, ok := b.EC2ClientsMap[region]; !ok {
+ b.EC2ClientsMap[region] = map[string]*ec2.EC2{stsRole: client}
+ } else {
+ b.EC2ClientsMap[region][stsRole] = client
+ }
+
+ return b.EC2ClientsMap[region][stsRole], nil
+}
+
+// clientIAM creates a client to interact with AWS IAM API
+func (b *backend) clientIAM(s logical.Storage, region string, stsRole string) (*iam.IAM, error) {
+ b.configMutex.RLock()
+ if b.IAMClientsMap[region] != nil && b.IAMClientsMap[region][stsRole] != nil {
+ defer b.configMutex.RUnlock()
+ // If the client object was already created, return it
+ return b.IAMClientsMap[region][stsRole], nil
+ }
+
+ // Release the read lock and acquire the write lock
+ b.configMutex.RUnlock()
+ b.configMutex.Lock()
+ defer b.configMutex.Unlock()
+
+ // If the client gets created while switching the locks, return it
+ if b.IAMClientsMap[region] != nil && b.IAMClientsMap[region][stsRole] != nil {
+ return b.IAMClientsMap[region][stsRole], nil
+ }
+
+ // Create an AWS config object using a chain of providers
+ var awsConfig *aws.Config
+ var err error
+ awsConfig, err = b.getClientConfig(s, region, stsRole, "iam")
+
+ if err != nil {
+ return nil, err
+ }
+
+ if awsConfig == nil {
+ return nil, fmt.Errorf("could not retrieve valid assumed credentials")
+ }
+
+ // Create a new IAM client object, cache it and return the same
+ client := iam.New(session.New(awsConfig))
+ if client == nil {
+ return nil, fmt.Errorf("could not obtain iam client")
+ }
+ if _, ok := b.IAMClientsMap[region]; !ok {
+ b.IAMClientsMap[region] = map[string]*iam.IAM{stsRole: client}
+ } else {
+ b.IAMClientsMap[region][stsRole] = client
+ }
+ return b.IAMClientsMap[region][stsRole], nil
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_certificate.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_certificate.go
new file mode 100644
index 0000000..0c026ed
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_certificate.go
@@ -0,0 +1,447 @@
+package awsauth
+
+import (
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/pem"
+ "fmt"
+ "math/big"
+ "strings"
+
+ "github.com/fatih/structs"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+// dsaSignature represents the contents of the signature of a signed
+// content using digital signature algorithm.
+type dsaSignature struct {
+ R, S *big.Int
+}
+
+// This certificate is used to verify the PKCS#7 signature of the instance
+// identity document. As per AWS documentation, this public key is valid for
+// US East (N. Virginia), US West (Oregon), US West (N. California), EU
+// (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia
+// Pacific (Singapore), Asia Pacific (Sydney), and South America (Sao Paulo).
+//
+// It's also the same certificate, but for some reason listed separately, for
+// GovCloud (US)
+const genericAWSPublicCertificatePkcs7 = `-----BEGIN CERTIFICATE-----
+MIIC7TCCAq0CCQCWukjZ5V4aZzAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw
+FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD
+VQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xMjAxMDUxMjU2MTJaFw0z
+ODAxMDUxMjU2MTJaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u
+IFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl
+cnZpY2VzIExMQzCCAbcwggEsBgcqhkjOOAQBMIIBHwKBgQCjkvcS2bb1VQ4yt/5e
+ih5OO6kK/n1Lzllr7D8ZwtQP8fOEpp5E2ng+D6Ud1Z1gYipr58Kj3nssSNpI6bX3
+VyIQzK7wLclnd/YozqNNmgIyZecN7EglK9ITHJLP+x8FtUpt3QbyYXJdmVMegN6P
+hviYt5JH/nYl4hh3Pa1HJdskgQIVALVJ3ER11+Ko4tP6nwvHwh6+ERYRAoGBAI1j
+k+tkqMVHuAFcvAGKocTgsjJem6/5qomzJuKDmbJNu9Qxw3rAotXau8Qe+MBcJl/U
+hhy1KHVpCGl9fueQ2s6IL0CaO/buycU1CiYQk40KNHCcHfNiZbdlx1E9rpUp7bnF
+lRa2v1ntMX3caRVDdbtPEWmdxSCYsYFDk4mZrOLBA4GEAAKBgEbmeve5f8LIE/Gf
+MNmP9CM5eovQOGx5ho8WqD+aTebs+k2tn92BBPqeZqpWRa5P/+jrdKml1qx4llHW
+MXrs3IgIb6+hUIB+S8dz8/mmO0bpr76RoZVCXYab2CZedFut7qc3WUH9+EUAH5mw
+vSeDCOUMYQR7R9LINYwouHIziqQYMAkGByqGSM44BAMDLwAwLAIUWXBlk40xTwSw
+7HX32MxXYruse9ACFBNGmdX2ZBrVNGrN9N2f6ROk0k9K
+-----END CERTIFICATE-----
+`
+
+// This certificate is used to verify the instance identity document using the
+// RSA digest of the same
+const genericAWSPublicCertificateIdentity = `-----BEGIN CERTIFICATE-----
+MIIDIjCCAougAwIBAgIJAKnL4UEDMN/FMA0GCSqGSIb3DQEBBQUAMGoxCzAJBgNV
+BAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdTZWF0dGxlMRgw
+FgYDVQQKEw9BbWF6b24uY29tIEluYy4xGjAYBgNVBAMTEWVjMi5hbWF6b25hd3Mu
+Y29tMB4XDTE0MDYwNTE0MjgwMloXDTI0MDYwNTE0MjgwMlowajELMAkGA1UEBhMC
+VVMxEzARBgNVBAgTCldhc2hpbmd0b24xEDAOBgNVBAcTB1NlYXR0bGUxGDAWBgNV
+BAoTD0FtYXpvbi5jb20gSW5jLjEaMBgGA1UEAxMRZWMyLmFtYXpvbmF3cy5jb20w
+gZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAIe9GN//SRK2knbjySG0ho3yqQM3
+e2TDhWO8D2e8+XZqck754gFSo99AbT2RmXClambI7xsYHZFapbELC4H91ycihvrD
+jbST1ZjkLQgga0NE1q43eS68ZeTDccScXQSNivSlzJZS8HJZjgqzBlXjZftjtdJL
+XeE4hwvo0sD4f3j9AgMBAAGjgc8wgcwwHQYDVR0OBBYEFCXWzAgVyrbwnFncFFIs
+77VBdlE4MIGcBgNVHSMEgZQwgZGAFCXWzAgVyrbwnFncFFIs77VBdlE4oW6kbDBq
+MQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHU2Vh
+dHRsZTEYMBYGA1UEChMPQW1hem9uLmNvbSBJbmMuMRowGAYDVQQDExFlYzIuYW1h
+em9uYXdzLmNvbYIJAKnL4UEDMN/FMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEF
+BQADgYEAFYcz1OgEhQBXIwIdsgCOS8vEtiJYF+j9uO6jz7VOmJqO+pRlAbRlvY8T
+C1haGgSI/A1uZUKs/Zfnph0oEI0/hu1IIJ/SKBDtN5lvmZ/IzbOPIJWirlsllQIQ
+7zvWbGd9c9+Rm3p04oTvhup99la7kZqevJK0QRdD/6NpCKsqP/0=
+-----END CERTIFICATE-----`
+
+// pathListCertificates creates a path that enables listing of all
+// the AWS public certificates registered with Vault.
+func pathListCertificates(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "config/certificates/?",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.pathCertificatesList,
+ },
+
+ HelpSynopsis: pathListCertificatesHelpSyn,
+ HelpDescription: pathListCertificatesHelpDesc,
+ }
+}
+
+func pathConfigCertificate(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "config/certificate/" + framework.GenericNameRegex("cert_name"),
+ Fields: map[string]*framework.FieldSchema{
+ "cert_name": {
+ Type: framework.TypeString,
+ Description: "Name of the certificate.",
+ },
+ "aws_public_cert": {
+ Type: framework.TypeString,
+ Description: "AWS Public cert required to verify PKCS7 signature of the EC2 instance metadata.",
+ },
+ "type": {
+ Type: framework.TypeString,
+ Default: "pkcs7",
+ Description: `
+Takes the value of either "pkcs7" or "identity", indicating the type of
+document which can be verified using the given certificate. The reason is that
+the PKCS#7 document will have a DSA digest and the identity signature will have
+an RSA signature, and accordingly the public certificates to verify those also
+vary. Defaults to "pkcs7".`,
+ },
+ },
+
+ ExistenceCheck: b.pathConfigCertificateExistenceCheck,
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.CreateOperation: b.pathConfigCertificateCreateUpdate,
+ logical.UpdateOperation: b.pathConfigCertificateCreateUpdate,
+ logical.ReadOperation: b.pathConfigCertificateRead,
+ logical.DeleteOperation: b.pathConfigCertificateDelete,
+ },
+
+ HelpSynopsis: pathConfigCertificateSyn,
+ HelpDescription: pathConfigCertificateDesc,
+ }
+}
+
+// Establishes dichotomy of request operation between CreateOperation and UpdateOperation.
+// Returning 'true' forces an UpdateOperation, CreateOperation otherwise.
+func (b *backend) pathConfigCertificateExistenceCheck(req *logical.Request, data *framework.FieldData) (bool, error) {
+ certName := data.Get("cert_name").(string)
+ if certName == "" {
+ return false, fmt.Errorf("missing cert_name")
+ }
+
+ entry, err := b.lockedAWSPublicCertificateEntry(req.Storage, certName)
+ if err != nil {
+ return false, err
+ }
+ return entry != nil, nil
+}
+
+// pathCertificatesList is used to list all the AWS public certificates registered with Vault
+func (b *backend) pathCertificatesList(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ b.configMutex.RLock()
+ defer b.configMutex.RUnlock()
+
+ certs, err := req.Storage.List("config/certificate/")
+ if err != nil {
+ return nil, err
+ }
+ return logical.ListResponse(certs), nil
+}
+
+// Decodes the PEM encoded certiticate and parses it into a x509 cert
+func decodePEMAndParseCertificate(certificate string) (*x509.Certificate, error) {
+ // Decode the PEM block and error out if a block is not detected in the first attempt
+ decodedPublicCert, rest := pem.Decode([]byte(certificate))
+ if len(rest) != 0 {
+ return nil, fmt.Errorf("invalid certificate; should be one PEM block only")
+ }
+
+ // Check if the certificate can be parsed
+ publicCert, err := x509.ParseCertificate(decodedPublicCert.Bytes)
+ if err != nil {
+ return nil, err
+ }
+ if publicCert == nil {
+ return nil, fmt.Errorf("invalid certificate; failed to parse certificate")
+ }
+ return publicCert, nil
+}
+
+// awsPublicCertificates returns a slice of all the parsed AWS public
+// certificates, which are used to verify either the SHA256 RSA signature, or
+// the PKCS7 signatures of the instance identity documents. This method will
+// append the certificates registered using `config/certificate/`
+// endpoint, along with the default certificate in the backend.
+func (b *backend) awsPublicCertificates(s logical.Storage, isPkcs bool) ([]*x509.Certificate, error) {
+ // Lock at beginning and use internal method so that we are consistent as
+ // we iterate through
+ b.configMutex.RLock()
+ defer b.configMutex.RUnlock()
+
+ var certs []*x509.Certificate
+
+ defaultCert := genericAWSPublicCertificateIdentity
+ if isPkcs {
+ defaultCert = genericAWSPublicCertificatePkcs7
+ }
+
+ // Append the generic certificate provided in the AWS EC2 instance metadata documentation
+ decodedCert, err := decodePEMAndParseCertificate(defaultCert)
+ if err != nil {
+ return nil, err
+ }
+ certs = append(certs, decodedCert)
+
+ // Get the list of all the registered certificates
+ registeredCerts, err := s.List("config/certificate/")
+ if err != nil {
+ return nil, err
+ }
+
+ // Iterate through each certificate, parse and append it to a slice
+ for _, cert := range registeredCerts {
+ certEntry, err := b.nonLockedAWSPublicCertificateEntry(s, cert)
+ if err != nil {
+ return nil, err
+ }
+ if certEntry == nil {
+ return nil, fmt.Errorf("certificate storage has a nil entry under the name:%s\n", cert)
+ }
+ // Append relevant certificates only
+ if (isPkcs && certEntry.Type == "pkcs7") ||
+ (!isPkcs && certEntry.Type == "identity") {
+ decodedCert, err := decodePEMAndParseCertificate(certEntry.AWSPublicCert)
+ if err != nil {
+ return nil, err
+ }
+ certs = append(certs, decodedCert)
+ }
+ }
+
+ return certs, nil
+}
+
+// lockedSetAWSPublicCertificateEntry is used to store the AWS public key in
+// the storage. This method acquires lock before creating or updating a storage
+// entry.
+func (b *backend) lockedSetAWSPublicCertificateEntry(s logical.Storage, certName string, certEntry *awsPublicCert) error {
+ if certName == "" {
+ return fmt.Errorf("missing certificate name")
+ }
+
+ if certEntry == nil {
+ return fmt.Errorf("nil AWS public key certificate")
+ }
+
+ b.configMutex.Lock()
+ defer b.configMutex.Unlock()
+
+ return b.nonLockedSetAWSPublicCertificateEntry(s, certName, certEntry)
+}
+
+// nonLockedSetAWSPublicCertificateEntry is used to store the AWS public key in
+// the storage. This method does not acquire lock before reading the storage.
+// If locking is desired, use lockedSetAWSPublicCertificateEntry instead.
+func (b *backend) nonLockedSetAWSPublicCertificateEntry(s logical.Storage, certName string, certEntry *awsPublicCert) error {
+ if certName == "" {
+ return fmt.Errorf("missing certificate name")
+ }
+
+ if certEntry == nil {
+ return fmt.Errorf("nil AWS public key certificate")
+ }
+
+ entry, err := logical.StorageEntryJSON("config/certificate/"+certName, certEntry)
+ if err != nil {
+ return err
+ }
+ if entry == nil {
+ return fmt.Errorf("failed to create storage entry for AWS public key certificate")
+ }
+
+ return s.Put(entry)
+}
+
+// lockedAWSPublicCertificateEntry is used to get the configured AWS Public Key
+// that is used to verify the PKCS#7 signature of the instance identity
+// document.
+func (b *backend) lockedAWSPublicCertificateEntry(s logical.Storage, certName string) (*awsPublicCert, error) {
+ b.configMutex.RLock()
+ defer b.configMutex.RUnlock()
+
+ return b.nonLockedAWSPublicCertificateEntry(s, certName)
+}
+
+// nonLockedAWSPublicCertificateEntry reads the certificate information from
+// the storage. This method does not acquire lock before reading the storage.
+// If locking is desired, use lockedAWSPublicCertificateEntry instead.
+func (b *backend) nonLockedAWSPublicCertificateEntry(s logical.Storage, certName string) (*awsPublicCert, error) {
+ entry, err := s.Get("config/certificate/" + certName)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+ var certEntry awsPublicCert
+ if err := entry.DecodeJSON(&certEntry); err != nil {
+ return nil, err
+ }
+
+ // Handle upgrade for certificate type
+ persistNeeded := false
+ if certEntry.Type == "" {
+ certEntry.Type = "pkcs7"
+ persistNeeded = true
+ }
+
+ if persistNeeded {
+ if err := b.nonLockedSetAWSPublicCertificateEntry(s, certName, &certEntry); err != nil {
+ return nil, err
+ }
+ }
+
+ return &certEntry, nil
+}
+
+// pathConfigCertificateDelete is used to delete the previously configured AWS
+// Public Key that is used to verify the PKCS#7 signature of the instance
+// identity document.
+func (b *backend) pathConfigCertificateDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ b.configMutex.Lock()
+ defer b.configMutex.Unlock()
+
+ certName := data.Get("cert_name").(string)
+ if certName == "" {
+ return logical.ErrorResponse("missing cert_name"), nil
+ }
+
+ return nil, req.Storage.Delete("config/certificate/" + certName)
+}
+
+// pathConfigCertificateRead is used to view the configured AWS Public Key that
+// is used to verify the PKCS#7 signature of the instance identity document.
+func (b *backend) pathConfigCertificateRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ certName := data.Get("cert_name").(string)
+ if certName == "" {
+ return logical.ErrorResponse("missing cert_name"), nil
+ }
+
+ certificateEntry, err := b.lockedAWSPublicCertificateEntry(req.Storage, certName)
+ if err != nil {
+ return nil, err
+ }
+ if certificateEntry == nil {
+ return nil, nil
+ }
+
+ return &logical.Response{
+ Data: structs.New(certificateEntry).Map(),
+ }, nil
+}
+
+// pathConfigCertificateCreateUpdate is used to register an AWS Public Key that
+// is used to verify the PKCS#7 signature of the instance identity document.
+func (b *backend) pathConfigCertificateCreateUpdate(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ certName := data.Get("cert_name").(string)
+ if certName == "" {
+ return logical.ErrorResponse("missing certificate name"), nil
+ }
+
+ b.configMutex.Lock()
+ defer b.configMutex.Unlock()
+
+ // Check if there is already a certificate entry registered
+ certEntry, err := b.nonLockedAWSPublicCertificateEntry(req.Storage, certName)
+ if err != nil {
+ return nil, err
+ }
+ if certEntry == nil {
+ certEntry = &awsPublicCert{}
+ }
+
+ // Check if type information is provided
+ certTypeRaw, ok := data.GetOk("type")
+ if ok {
+ certEntry.Type = strings.ToLower(certTypeRaw.(string))
+ } else if req.Operation == logical.CreateOperation {
+ certEntry.Type = data.Get("type").(string)
+ }
+
+ switch certEntry.Type {
+ case "pkcs7":
+ case "identity":
+ default:
+ return logical.ErrorResponse(fmt.Sprintf("invalid certificate type %q", certEntry.Type)), nil
+ }
+
+ // Check if the value is provided by the client
+ certStrData, ok := data.GetOk("aws_public_cert")
+ if ok {
+ if certBytes, err := base64.StdEncoding.DecodeString(certStrData.(string)); err == nil {
+ certEntry.AWSPublicCert = string(certBytes)
+ } else {
+ certEntry.AWSPublicCert = certStrData.(string)
+ }
+ } else {
+ // aws_public_cert should be supplied for both create and update operations.
+ // If it is not provided, throw an error.
+ return logical.ErrorResponse("missing aws_public_cert"), nil
+ }
+
+ // If explicitly set to empty string, error out
+ if certEntry.AWSPublicCert == "" {
+ return logical.ErrorResponse("invalid aws_public_cert"), nil
+ }
+
+ // Verify the certificate by decoding it and parsing it
+ publicCert, err := decodePEMAndParseCertificate(certEntry.AWSPublicCert)
+ if err != nil {
+ return nil, err
+ }
+ if publicCert == nil {
+ return logical.ErrorResponse("invalid certificate; failed to decode and parse certificate"), nil
+ }
+
+ // If none of the checks fail, save the provided certificate
+ if err := b.nonLockedSetAWSPublicCertificateEntry(req.Storage, certName, certEntry); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+// Struct awsPublicCert holds the AWS Public Key that is used to verify the PKCS#7 signature
+// of the instnace identity document.
+type awsPublicCert struct {
+ AWSPublicCert string `json:"aws_public_cert" structs:"aws_public_cert" mapstructure:"aws_public_cert"`
+ Type string `json:"type" structs:"type" mapstructure:"type"`
+}
+
+const pathConfigCertificateSyn = `
+Adds the AWS Public Key that is used to verify the PKCS#7 signature of the identidy document.
+`
+
+const pathConfigCertificateDesc = `
+AWS Public Key which is used to verify the PKCS#7 signature of the identity document,
+varies by region. The public key(s) can be found in AWS EC2 instance metadata documentation.
+The default key that is used to verify the signature is the one that is applicable for
+following regions: US East (N. Virginia), US West (Oregon), US West (N. California),
+EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Singapore),
+Asia Pacific (Sydney), and South America (Sao Paulo).
+
+If the instances belongs to region other than the above, the public key(s) for the
+corresponding regions should be registered using this endpoint. PKCS#7 is verified
+using a collection of certificates containing the default certificate and all the
+certificates that are registered using this endpoint.
+`
+const pathListCertificatesHelpSyn = `
+Lists all the AWS public certificates that are registered with the backend.
+`
+const pathListCertificatesHelpDesc = `
+Certificates will be listed by their respective names that were used during registration.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_client.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_client.go
new file mode 100644
index 0000000..3787aed
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_client.go
@@ -0,0 +1,265 @@
+package awsauth
+
+import (
+ "github.com/fatih/structs"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathConfigClient(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "config/client$",
+ Fields: map[string]*framework.FieldSchema{
+ "access_key": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "",
+ Description: "AWS Access Key ID for the account used to make AWS API requests.",
+ },
+
+ "secret_key": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "",
+ Description: "AWS Secret Access Key for the account used to make AWS API requests.",
+ },
+
+ "endpoint": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "",
+ Description: "URL to override the default generated endpoint for making AWS EC2 API calls.",
+ },
+
+ "iam_endpoint": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "",
+ Description: "URL to override the default generated endpoint for making AWS IAM API calls.",
+ },
+
+ "sts_endpoint": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "",
+ Description: "URL to override the default generated endpoint for making AWS STS API calls.",
+ },
+
+ "iam_server_id_header_value": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "",
+ Description: "Value to require in the X-Vault-AWS-IAM-Server-ID request header",
+ },
+ },
+
+ ExistenceCheck: b.pathConfigClientExistenceCheck,
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.CreateOperation: b.pathConfigClientCreateUpdate,
+ logical.UpdateOperation: b.pathConfigClientCreateUpdate,
+ logical.DeleteOperation: b.pathConfigClientDelete,
+ logical.ReadOperation: b.pathConfigClientRead,
+ },
+
+ HelpSynopsis: pathConfigClientHelpSyn,
+ HelpDescription: pathConfigClientHelpDesc,
+ }
+}
+
+// Establishes dichotomy of request operation between CreateOperation and UpdateOperation.
+// Returning 'true' forces an UpdateOperation, CreateOperation otherwise.
+func (b *backend) pathConfigClientExistenceCheck(
+ req *logical.Request, data *framework.FieldData) (bool, error) {
+
+ entry, err := b.lockedClientConfigEntry(req.Storage)
+ if err != nil {
+ return false, err
+ }
+ return entry != nil, nil
+}
+
+// Fetch the client configuration required to access the AWS API, after acquiring an exclusive lock.
+func (b *backend) lockedClientConfigEntry(s logical.Storage) (*clientConfig, error) {
+ b.configMutex.RLock()
+ defer b.configMutex.RUnlock()
+
+ return b.nonLockedClientConfigEntry(s)
+}
+
+// Fetch the client configuration required to access the AWS API.
+func (b *backend) nonLockedClientConfigEntry(s logical.Storage) (*clientConfig, error) {
+ entry, err := s.Get("config/client")
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result clientConfig
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+ return &result, nil
+}
+
+func (b *backend) pathConfigClientRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ clientConfig, err := b.lockedClientConfigEntry(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+
+ if clientConfig == nil {
+ return nil, nil
+ }
+
+ return &logical.Response{
+ Data: structs.New(clientConfig).Map(),
+ }, nil
+}
+
+func (b *backend) pathConfigClientDelete(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ b.configMutex.Lock()
+ defer b.configMutex.Unlock()
+
+ if err := req.Storage.Delete("config/client"); err != nil {
+ return nil, err
+ }
+
+ // Remove all the cached EC2 client objects in the backend.
+ b.flushCachedEC2Clients()
+
+ // Remove all the cached EC2 client objects in the backend.
+ b.flushCachedIAMClients()
+
+ return nil, nil
+}
+
+// pathConfigClientCreateUpdate is used to register the 'aws_secret_key' and 'aws_access_key'
+// that can be used to interact with AWS EC2 API.
+func (b *backend) pathConfigClientCreateUpdate(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ b.configMutex.Lock()
+ defer b.configMutex.Unlock()
+
+ configEntry, err := b.nonLockedClientConfigEntry(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+ if configEntry == nil {
+ configEntry = &clientConfig{}
+ }
+
+ changedCreds := false
+
+ accessKeyStr, ok := data.GetOk("access_key")
+ if ok {
+ if configEntry.AccessKey != accessKeyStr.(string) {
+ changedCreds = true
+ configEntry.AccessKey = accessKeyStr.(string)
+ }
+ } else if req.Operation == logical.CreateOperation {
+ // Use the default
+ configEntry.AccessKey = data.Get("access_key").(string)
+ }
+
+ secretKeyStr, ok := data.GetOk("secret_key")
+ if ok {
+ if configEntry.SecretKey != secretKeyStr.(string) {
+ changedCreds = true
+ configEntry.SecretKey = secretKeyStr.(string)
+ }
+ } else if req.Operation == logical.CreateOperation {
+ configEntry.SecretKey = data.Get("secret_key").(string)
+ }
+
+ endpointStr, ok := data.GetOk("endpoint")
+ if ok {
+ if configEntry.Endpoint != endpointStr.(string) {
+ changedCreds = true
+ configEntry.Endpoint = endpointStr.(string)
+ }
+ } else if req.Operation == logical.CreateOperation {
+ configEntry.Endpoint = data.Get("endpoint").(string)
+ }
+
+ iamEndpointStr, ok := data.GetOk("iam_endpoint")
+ if ok {
+ if configEntry.IAMEndpoint != iamEndpointStr.(string) {
+ changedCreds = true
+ configEntry.IAMEndpoint = iamEndpointStr.(string)
+ }
+ } else if req.Operation == logical.CreateOperation {
+ configEntry.IAMEndpoint = data.Get("iam_endpoint").(string)
+ }
+
+ stsEndpointStr, ok := data.GetOk("sts_endpoint")
+ if ok {
+ if configEntry.STSEndpoint != stsEndpointStr.(string) {
+ // We don't directly cache STS clients as they are ever directly used.
+ // However, they are potentially indirectly used as credential providers
+ // for the EC2 and IAM clients, and thus we would be indirectly caching
+ // them there. So, if we change the STS endpoint, we should flush those
+ // cached clients.
+ changedCreds = true
+ configEntry.STSEndpoint = stsEndpointStr.(string)
+ }
+ } else if req.Operation == logical.CreateOperation {
+ configEntry.STSEndpoint = data.Get("sts_endpoint").(string)
+ }
+
+ headerValStr, ok := data.GetOk("iam_server_id_header_value")
+ if ok {
+ if configEntry.IAMServerIdHeaderValue != headerValStr.(string) {
+ // NOT setting changedCreds here, since this isn't really cached
+ configEntry.IAMServerIdHeaderValue = headerValStr.(string)
+ }
+ } else if req.Operation == logical.CreateOperation {
+ configEntry.IAMServerIdHeaderValue = data.Get("iam_server_id_header_value").(string)
+ }
+
+ // Since this endpoint supports both create operation and update operation,
+ // the error checks for access_key and secret_key not being set are not present.
+ // This allows calling this endpoint multiple times to provide the values.
+ // Hence, the readers of this endpoint should do the validation on
+ // the validation of keys before using them.
+ entry, err := logical.StorageEntryJSON("config/client", configEntry)
+ if err != nil {
+ return nil, err
+ }
+
+ if changedCreds || req.Operation == logical.CreateOperation {
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+ }
+
+ if changedCreds {
+ b.flushCachedEC2Clients()
+ b.flushCachedIAMClients()
+ }
+
+ return nil, nil
+}
+
+// Struct to hold 'aws_access_key' and 'aws_secret_key' that are required to
+// interact with the AWS EC2 API.
+type clientConfig struct {
+ AccessKey string `json:"access_key" structs:"access_key" mapstructure:"access_key"`
+ SecretKey string `json:"secret_key" structs:"secret_key" mapstructure:"secret_key"`
+ Endpoint string `json:"endpoint" structs:"endpoint" mapstructure:"endpoint"`
+ IAMEndpoint string `json:"iam_endpoint" structs:"iam_endpoint" mapstructure:"iam_endpoint"`
+ STSEndpoint string `json:"sts_endpoint" structs:"sts_endpoint" mapstructure:"sts_endpoint"`
+ IAMServerIdHeaderValue string `json:"iam_server_id_header_value" structs:"iam_server_id_header_value" mapstructure:"iam_server_id_header_value"`
+}
+
+const pathConfigClientHelpSyn = `
+Configure AWS IAM credentials that are used to query instance and role details from the AWS API.
+`
+
+const pathConfigClientHelpDesc = `
+The aws-ec2 auth backend makes AWS API queries to retrieve information
+regarding EC2 instances that perform login operations. The 'aws_secret_key' and
+'aws_access_key' parameters configured here should map to an AWS IAM user that
+has permission to make the following API queries:
+
+* ec2:DescribeInstances
+* iam:GetInstanceProfile (if IAM Role binding is used)
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_client_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_client_test.go
new file mode 100644
index 0000000..2685710
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_client_test.go
@@ -0,0 +1,76 @@
+package awsauth
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestBackend_pathConfigClient(t *testing.T) {
+ config := logical.TestBackendConfig()
+ storage := &logical.InmemStorage{}
+ config.StorageView = storage
+
+ b, err := Backend(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = b.Setup(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // make sure we start with empty roles, which gives us confidence that the read later
+ // actually is the two roles we created
+ resp, err := b.HandleRequest(&logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "config/client",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ // at this point, resp == nil is valid as no client config exists
+ // if resp != nil, then resp.Data must have EndPoint and IAMServerIdHeaderValue as nil
+ if resp != nil {
+ if resp.IsError() {
+ t.Fatalf("failed to read client config entry")
+ } else if resp.Data["endpoint"] != nil || resp.Data["iam_server_id_header_value"] != nil {
+ t.Fatalf("returned endpoint or iam_server_id_header_value non-nil")
+ }
+ }
+
+ data := map[string]interface{}{
+ "sts_endpoint": "https://my-custom-sts-endpoint.example.com",
+ "iam_server_id_header_value": "vault_server_identification_314159",
+ }
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "config/client",
+ Data: data,
+ Storage: storage,
+ })
+
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp != nil && resp.IsError() {
+ t.Fatal("failed to create the client config entry")
+ }
+
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "config/client",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil || resp.IsError() {
+ t.Fatal("failed to read the client config entry")
+ }
+ if resp.Data["iam_server_id_header_value"] != data["iam_server_id_header_value"] {
+ t.Fatalf("expected iam_server_id_header_value: '%#v'; returned iam_server_id_header_value: '%#v'",
+ data["iam_server_id_header_value"], resp.Data["iam_server_id_header_value"])
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_sts.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_sts.go
new file mode 100644
index 0000000..4366feb
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_sts.go
@@ -0,0 +1,248 @@
+package awsauth
+
+import (
+ "fmt"
+
+ "github.com/fatih/structs"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+// awsStsEntry is used to store details of an STS role for assumption
+type awsStsEntry struct {
+ StsRole string `json:"sts_role" structs:"sts_role" mapstructure:"sts_role"`
+}
+
+func pathListSts(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "config/sts/?",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.pathStsList,
+ },
+
+ HelpSynopsis: pathListStsHelpSyn,
+ HelpDescription: pathListStsHelpDesc,
+ }
+}
+
+func pathConfigSts(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "config/sts/" + framework.GenericNameRegex("account_id"),
+ Fields: map[string]*framework.FieldSchema{
+ "account_id": {
+ Type: framework.TypeString,
+ Description: `AWS account ID to be associated with STS role. If set,
+Vault will use assumed credentials to verify any login attempts from EC2
+instances in this account.`,
+ },
+ "sts_role": {
+ Type: framework.TypeString,
+ Description: `AWS ARN for STS role to be assumed when interacting with the account specified.
+The Vault server must have permissions to assume this role.`,
+ },
+ },
+
+ ExistenceCheck: b.pathConfigStsExistenceCheck,
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.CreateOperation: b.pathConfigStsCreateUpdate,
+ logical.UpdateOperation: b.pathConfigStsCreateUpdate,
+ logical.ReadOperation: b.pathConfigStsRead,
+ logical.DeleteOperation: b.pathConfigStsDelete,
+ },
+
+ HelpSynopsis: pathConfigStsSyn,
+ HelpDescription: pathConfigStsDesc,
+ }
+}
+
+// Establishes dichotomy of request operation between CreateOperation and UpdateOperation.
+// Returning 'true' forces an UpdateOperation, CreateOperation otherwise.
+func (b *backend) pathConfigStsExistenceCheck(req *logical.Request, data *framework.FieldData) (bool, error) {
+ accountID := data.Get("account_id").(string)
+ if accountID == "" {
+ return false, fmt.Errorf("missing account_id")
+ }
+
+ entry, err := b.lockedAwsStsEntry(req.Storage, accountID)
+ if err != nil {
+ return false, err
+ }
+
+ return entry != nil, nil
+}
+
+// pathStsList is used to list all the AWS STS role configurations
+func (b *backend) pathStsList(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ b.configMutex.RLock()
+ defer b.configMutex.RUnlock()
+ sts, err := req.Storage.List("config/sts/")
+ if err != nil {
+ return nil, err
+ }
+ return logical.ListResponse(sts), nil
+}
+
+// nonLockedSetAwsStsEntry creates or updates an STS role association with the given accountID
+// This method does not acquire the write lock before creating or updating. If locking is
+// desired, use lockedSetAwsStsEntry instead
+func (b *backend) nonLockedSetAwsStsEntry(s logical.Storage, accountID string, stsEntry *awsStsEntry) error {
+ if accountID == "" {
+ return fmt.Errorf("missing AWS account ID")
+ }
+
+ if stsEntry == nil {
+ return fmt.Errorf("missing AWS STS Role ARN")
+ }
+
+ entry, err := logical.StorageEntryJSON("config/sts/"+accountID, stsEntry)
+ if err != nil {
+ return err
+ }
+
+ if entry == nil {
+ return fmt.Errorf("failed to create storage entry for AWS STS configuration")
+ }
+
+ return s.Put(entry)
+}
+
+// lockedSetAwsStsEntry creates or updates an STS role association with the given accountID
+// This method acquires the write lock before creating or updating the STS entry.
+func (b *backend) lockedSetAwsStsEntry(s logical.Storage, accountID string, stsEntry *awsStsEntry) error {
+ if accountID == "" {
+ return fmt.Errorf("missing AWS account ID")
+ }
+
+ if stsEntry == nil {
+ return fmt.Errorf("missing sts entry")
+ }
+
+ b.configMutex.Lock()
+ defer b.configMutex.Unlock()
+
+ return b.nonLockedSetAwsStsEntry(s, accountID, stsEntry)
+}
+
+// nonLockedAwsStsEntry returns the STS role associated with the given accountID.
+// This method does not acquire the read lock before returning information. If locking is
+// desired, use lockedAwsStsEntry instead
+func (b *backend) nonLockedAwsStsEntry(s logical.Storage, accountID string) (*awsStsEntry, error) {
+ entry, err := s.Get("config/sts/" + accountID)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+ var stsEntry awsStsEntry
+ if err := entry.DecodeJSON(&stsEntry); err != nil {
+ return nil, err
+ }
+
+ return &stsEntry, nil
+}
+
+// lockedAwsStsEntry returns the STS role associated with the given accountID.
+// This method acquires the read lock before returning the association.
+func (b *backend) lockedAwsStsEntry(s logical.Storage, accountID string) (*awsStsEntry, error) {
+ b.configMutex.RLock()
+ defer b.configMutex.RUnlock()
+
+ return b.nonLockedAwsStsEntry(s, accountID)
+}
+
+// pathConfigStsRead is used to return information about an STS role/AWS accountID association
+func (b *backend) pathConfigStsRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ accountID := data.Get("account_id").(string)
+ if accountID == "" {
+ return logical.ErrorResponse("missing account id"), nil
+ }
+
+ stsEntry, err := b.lockedAwsStsEntry(req.Storage, accountID)
+ if err != nil {
+ return nil, err
+ }
+ if stsEntry == nil {
+ return nil, nil
+ }
+
+ return &logical.Response{
+ Data: structs.New(stsEntry).Map(),
+ }, nil
+}
+
+// pathConfigStsCreateUpdate is used to associate an STS role with a given AWS accountID
+func (b *backend) pathConfigStsCreateUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ accountID := data.Get("account_id").(string)
+ if accountID == "" {
+ return logical.ErrorResponse("missing AWS account ID"), nil
+ }
+
+ b.configMutex.Lock()
+ defer b.configMutex.Unlock()
+
+ // Check if an STS role is already registered
+ stsEntry, err := b.nonLockedAwsStsEntry(req.Storage, accountID)
+ if err != nil {
+ return nil, err
+ }
+ if stsEntry == nil {
+ stsEntry = &awsStsEntry{}
+ }
+
+ // Check that an STS role has actually been provided
+ stsRole, ok := data.GetOk("sts_role")
+ if ok {
+ stsEntry.StsRole = stsRole.(string)
+ } else if req.Operation == logical.CreateOperation {
+ return logical.ErrorResponse("missing sts role"), nil
+ }
+
+ if stsEntry.StsRole == "" {
+ return logical.ErrorResponse("sts role cannot be empty"), nil
+ }
+
+ // save the provided STS role
+ if err := b.nonLockedSetAwsStsEntry(req.Storage, accountID, stsEntry); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+// pathConfigStsDelete is used to delete a previously configured STS configuration
+func (b *backend) pathConfigStsDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ b.configMutex.Lock()
+ defer b.configMutex.Unlock()
+
+ accountID := data.Get("account_id").(string)
+ if accountID == "" {
+ return logical.ErrorResponse("missing account id"), nil
+ }
+
+ return nil, req.Storage.Delete("config/sts/" + accountID)
+}
+
+const pathConfigStsSyn = `
+Specify STS roles to be assumed for certain AWS accounts.
+`
+
+const pathConfigStsDesc = `
+Allows the explicit association of STS roles to satellite AWS accounts (i.e. those
+which are not the account in which the Vault server is running.) Login attempts from
+EC2 instances running in these accounts will be verified using credentials obtained
+by assumption of these STS roles.
+
+The environment in which the Vault server resides must have access to assume the
+given STS roles.
+`
+const pathListStsHelpSyn = `
+List all the AWS account/STS role relationships registered with Vault.
+`
+
+const pathListStsHelpDesc = `
+AWS accounts will be listed by account ID, along with their respective role names.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_tidy_identity_whitelist.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_tidy_identity_whitelist.go
new file mode 100644
index 0000000..43aafaa
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_tidy_identity_whitelist.go
@@ -0,0 +1,149 @@
+package awsauth
+
+import (
+ "fmt"
+
+ "github.com/fatih/structs"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+const (
+ identityWhitelistConfigPath = "config/tidy/identity-whitelist"
+)
+
+func pathConfigTidyIdentityWhitelist(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: fmt.Sprintf("%s$", identityWhitelistConfigPath),
+ Fields: map[string]*framework.FieldSchema{
+ "safety_buffer": &framework.FieldSchema{
+ Type: framework.TypeDurationSecond,
+ Default: 259200, //72h
+ Description: `The amount of extra time that must have passed beyond the identity's
+expiration, before it is removed from the backend storage.`,
+ },
+ "disable_periodic_tidy": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: false,
+ Description: "If set to 'true', disables the periodic tidying of the 'identity-whitelist/' entries.",
+ },
+ },
+
+ ExistenceCheck: b.pathConfigTidyIdentityWhitelistExistenceCheck,
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.CreateOperation: b.pathConfigTidyIdentityWhitelistCreateUpdate,
+ logical.UpdateOperation: b.pathConfigTidyIdentityWhitelistCreateUpdate,
+ logical.ReadOperation: b.pathConfigTidyIdentityWhitelistRead,
+ logical.DeleteOperation: b.pathConfigTidyIdentityWhitelistDelete,
+ },
+
+ HelpSynopsis: pathConfigTidyIdentityWhitelistHelpSyn,
+ HelpDescription: pathConfigTidyIdentityWhitelistHelpDesc,
+ }
+}
+
+func (b *backend) pathConfigTidyIdentityWhitelistExistenceCheck(req *logical.Request, data *framework.FieldData) (bool, error) {
+ entry, err := b.lockedConfigTidyIdentities(req.Storage)
+ if err != nil {
+ return false, err
+ }
+ return entry != nil, nil
+}
+
+func (b *backend) lockedConfigTidyIdentities(s logical.Storage) (*tidyWhitelistIdentityConfig, error) {
+ b.configMutex.RLock()
+ defer b.configMutex.RUnlock()
+
+ return b.nonLockedConfigTidyIdentities(s)
+}
+
+func (b *backend) nonLockedConfigTidyIdentities(s logical.Storage) (*tidyWhitelistIdentityConfig, error) {
+ entry, err := s.Get(identityWhitelistConfigPath)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result tidyWhitelistIdentityConfig
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+ return &result, nil
+}
+
+func (b *backend) pathConfigTidyIdentityWhitelistCreateUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ b.configMutex.Lock()
+ defer b.configMutex.Unlock()
+
+ configEntry, err := b.nonLockedConfigTidyIdentities(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+ if configEntry == nil {
+ configEntry = &tidyWhitelistIdentityConfig{}
+ }
+
+ safetyBufferInt, ok := data.GetOk("safety_buffer")
+ if ok {
+ configEntry.SafetyBuffer = safetyBufferInt.(int)
+ } else if req.Operation == logical.CreateOperation {
+ configEntry.SafetyBuffer = data.Get("safety_buffer").(int)
+ }
+
+ disablePeriodicTidyBool, ok := data.GetOk("disable_periodic_tidy")
+ if ok {
+ configEntry.DisablePeriodicTidy = disablePeriodicTidyBool.(bool)
+ } else if req.Operation == logical.CreateOperation {
+ configEntry.DisablePeriodicTidy = data.Get("disable_periodic_tidy").(bool)
+ }
+
+ entry, err := logical.StorageEntryJSON(identityWhitelistConfigPath, configEntry)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *backend) pathConfigTidyIdentityWhitelistRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ clientConfig, err := b.lockedConfigTidyIdentities(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+ if clientConfig == nil {
+ return nil, nil
+ }
+
+ return &logical.Response{
+ Data: structs.New(clientConfig).Map(),
+ }, nil
+}
+
+func (b *backend) pathConfigTidyIdentityWhitelistDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ b.configMutex.Lock()
+ defer b.configMutex.Unlock()
+
+ return nil, req.Storage.Delete(identityWhitelistConfigPath)
+}
+
+type tidyWhitelistIdentityConfig struct {
+ SafetyBuffer int `json:"safety_buffer" structs:"safety_buffer" mapstructure:"safety_buffer"`
+ DisablePeriodicTidy bool `json:"disable_periodic_tidy" structs:"disable_periodic_tidy" mapstructure:"disable_periodic_tidy"`
+}
+
+const pathConfigTidyIdentityWhitelistHelpSyn = `
+Configures the periodic tidying operation of the whitelisted identity entries.
+`
+const pathConfigTidyIdentityWhitelistHelpDesc = `
+By default, the expired entries in the whitelist will be attempted to be removed
+periodically. This operation will look for expired items in the list and purges them.
+However, there is a safety buffer duration (defaults to 72h), purges the entries
+only if they have been persisting this duration, past its expiration time.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_tidy_roletag_blacklist.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_tidy_roletag_blacklist.go
new file mode 100644
index 0000000..c3059c6
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_tidy_roletag_blacklist.go
@@ -0,0 +1,150 @@
+package awsauth
+
+import (
+ "fmt"
+
+ "github.com/fatih/structs"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+const (
+ roletagBlacklistConfigPath = "config/tidy/roletag-blacklist"
+)
+
+func pathConfigTidyRoletagBlacklist(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: fmt.Sprintf("%s$", roletagBlacklistConfigPath),
+ Fields: map[string]*framework.FieldSchema{
+ "safety_buffer": &framework.FieldSchema{
+ Type: framework.TypeDurationSecond,
+ Default: 15552000, //180d
+ Description: `The amount of extra time that must have passed beyond the roletag
+expiration, before it is removed from the backend storage.
+Defaults to 4320h (180 days).`,
+ },
+
+ "disable_periodic_tidy": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: false,
+ Description: "If set to 'true', disables the periodic tidying of blacklisted entries.",
+ },
+ },
+
+ ExistenceCheck: b.pathConfigTidyRoletagBlacklistExistenceCheck,
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.CreateOperation: b.pathConfigTidyRoletagBlacklistCreateUpdate,
+ logical.UpdateOperation: b.pathConfigTidyRoletagBlacklistCreateUpdate,
+ logical.ReadOperation: b.pathConfigTidyRoletagBlacklistRead,
+ logical.DeleteOperation: b.pathConfigTidyRoletagBlacklistDelete,
+ },
+
+ HelpSynopsis: pathConfigTidyRoletagBlacklistHelpSyn,
+ HelpDescription: pathConfigTidyRoletagBlacklistHelpDesc,
+ }
+}
+
+func (b *backend) pathConfigTidyRoletagBlacklistExistenceCheck(req *logical.Request, data *framework.FieldData) (bool, error) {
+ entry, err := b.lockedConfigTidyRoleTags(req.Storage)
+ if err != nil {
+ return false, err
+ }
+ return entry != nil, nil
+}
+
+func (b *backend) lockedConfigTidyRoleTags(s logical.Storage) (*tidyBlacklistRoleTagConfig, error) {
+ b.configMutex.RLock()
+ defer b.configMutex.RUnlock()
+
+ return b.nonLockedConfigTidyRoleTags(s)
+}
+
+func (b *backend) nonLockedConfigTidyRoleTags(s logical.Storage) (*tidyBlacklistRoleTagConfig, error) {
+ entry, err := s.Get(roletagBlacklistConfigPath)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result tidyBlacklistRoleTagConfig
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+func (b *backend) pathConfigTidyRoletagBlacklistCreateUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ b.configMutex.Lock()
+ defer b.configMutex.Unlock()
+
+ configEntry, err := b.nonLockedConfigTidyRoleTags(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+ if configEntry == nil {
+ configEntry = &tidyBlacklistRoleTagConfig{}
+ }
+ safetyBufferInt, ok := data.GetOk("safety_buffer")
+ if ok {
+ configEntry.SafetyBuffer = safetyBufferInt.(int)
+ } else if req.Operation == logical.CreateOperation {
+ configEntry.SafetyBuffer = data.Get("safety_buffer").(int)
+ }
+ disablePeriodicTidyBool, ok := data.GetOk("disable_periodic_tidy")
+ if ok {
+ configEntry.DisablePeriodicTidy = disablePeriodicTidyBool.(bool)
+ } else if req.Operation == logical.CreateOperation {
+ configEntry.DisablePeriodicTidy = data.Get("disable_periodic_tidy").(bool)
+ }
+
+ entry, err := logical.StorageEntryJSON(roletagBlacklistConfigPath, configEntry)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *backend) pathConfigTidyRoletagBlacklistRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ clientConfig, err := b.lockedConfigTidyRoleTags(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+ if clientConfig == nil {
+ return nil, nil
+ }
+
+ return &logical.Response{
+ Data: structs.New(clientConfig).Map(),
+ }, nil
+}
+
+func (b *backend) pathConfigTidyRoletagBlacklistDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ b.configMutex.Lock()
+ defer b.configMutex.Unlock()
+
+ return nil, req.Storage.Delete(roletagBlacklistConfigPath)
+}
+
+type tidyBlacklistRoleTagConfig struct {
+ SafetyBuffer int `json:"safety_buffer" structs:"safety_buffer" mapstructure:"safety_buffer"`
+ DisablePeriodicTidy bool `json:"disable_periodic_tidy" structs:"disable_periodic_tidy" mapstructure:"disable_periodic_tidy"`
+}
+
+const pathConfigTidyRoletagBlacklistHelpSyn = `
+Configures the periodic tidying operation of the blacklisted role tag entries.
+`
+const pathConfigTidyRoletagBlacklistHelpDesc = `
+By default, the expired entries in the blacklist will be attempted to be removed
+periodically. This operation will look for expired items in the list and purges them.
+However, there is a safety buffer duration (defaults to 72h), purges the entries
+only if they have been persisting this duration, past its expiration time.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_identity_whitelist.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_identity_whitelist.go
new file mode 100644
index 0000000..600fc7d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_identity_whitelist.go
@@ -0,0 +1,159 @@
+package awsauth
+
+import (
+ "time"
+
+ "github.com/fatih/structs"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathIdentityWhitelist(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "identity-whitelist/" + framework.GenericNameRegex("instance_id"),
+ Fields: map[string]*framework.FieldSchema{
+ "instance_id": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `EC2 instance ID. A successful login operation from an EC2 instance
+gets cached in this whitelist, keyed off of instance ID.`,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathIdentityWhitelistRead,
+ logical.DeleteOperation: b.pathIdentityWhitelistDelete,
+ },
+
+ HelpSynopsis: pathIdentityWhitelistSyn,
+ HelpDescription: pathIdentityWhitelistDesc,
+ }
+}
+
+func pathListIdentityWhitelist(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "identity-whitelist/?",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.pathWhitelistIdentitiesList,
+ },
+
+ HelpSynopsis: pathListIdentityWhitelistHelpSyn,
+ HelpDescription: pathListIdentityWhitelistHelpDesc,
+ }
+}
+
+// pathWhitelistIdentitiesList is used to list all the instance IDs that are present
+// in the identity whitelist. This will list both valid and expired entries.
+func (b *backend) pathWhitelistIdentitiesList(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ identities, err := req.Storage.List("whitelist/identity/")
+ if err != nil {
+ return nil, err
+ }
+ return logical.ListResponse(identities), nil
+}
+
+// Fetch an item from the whitelist given an instance ID.
+func whitelistIdentityEntry(s logical.Storage, instanceID string) (*whitelistIdentity, error) {
+ entry, err := s.Get("whitelist/identity/" + instanceID)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result whitelistIdentity
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+ return &result, nil
+}
+
+// Stores an instance ID and the information required to validate further login/renewal attempts from
+// the same instance ID.
+func setWhitelistIdentityEntry(s logical.Storage, instanceID string, identity *whitelistIdentity) error {
+ entry, err := logical.StorageEntryJSON("whitelist/identity/"+instanceID, identity)
+ if err != nil {
+ return err
+ }
+
+ if err := s.Put(entry); err != nil {
+ return err
+ }
+ return nil
+}
+
+// pathIdentityWhitelistDelete is used to delete an entry from the identity whitelist given an instance ID.
+func (b *backend) pathIdentityWhitelistDelete(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ instanceID := data.Get("instance_id").(string)
+ if instanceID == "" {
+ return logical.ErrorResponse("missing instance_id"), nil
+ }
+
+ return nil, req.Storage.Delete("whitelist/identity/" + instanceID)
+}
+
+// pathIdentityWhitelistRead is used to view an entry in the identity whitelist given an instance ID.
+func (b *backend) pathIdentityWhitelistRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ instanceID := data.Get("instance_id").(string)
+ if instanceID == "" {
+ return logical.ErrorResponse("missing instance_id"), nil
+ }
+
+ entry, err := whitelistIdentityEntry(req.Storage, instanceID)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ resp := &logical.Response{
+ Data: structs.New(entry).Map(),
+ }
+ resp.Data["creation_time"] = entry.CreationTime.Format(time.RFC3339Nano)
+ resp.Data["expiration_time"] = entry.ExpirationTime.Format(time.RFC3339Nano)
+ resp.Data["last_updated_time"] = entry.LastUpdatedTime.Format(time.RFC3339Nano)
+
+ return resp, nil
+}
+
+// Struct to represent each item in the identity whitelist.
+type whitelistIdentity struct {
+ Role string `json:"role" structs:"role" mapstructure:"role"`
+ ClientNonce string `json:"client_nonce" structs:"client_nonce" mapstructure:"client_nonce"`
+ CreationTime time.Time `json:"creation_time" structs:"creation_time" mapstructure:"creation_time"`
+ DisallowReauthentication bool `json:"disallow_reauthentication" structs:"disallow_reauthentication" mapstructure:"disallow_reauthentication"`
+ PendingTime string `json:"pending_time" structs:"pending_time" mapstructure:"pending_time"`
+ ExpirationTime time.Time `json:"expiration_time" structs:"expiration_time" mapstructure:"expiration_time"`
+ LastUpdatedTime time.Time `json:"last_updated_time" structs:"last_updated_time" mapstructure:"last_updated_time"`
+}
+
+const pathIdentityWhitelistSyn = `
+Read or delete entries in the identity whitelist.
+`
+
+const pathIdentityWhitelistDesc = `
+Each login from an EC2 instance creates/updates an entry in the identity whitelist.
+
+Entries in this list can be viewed or deleted using this endpoint.
+
+By default, a cron task will periodically look for expired entries in the whitelist
+and deletes them. The duration to periodically run this, is one hour by default.
+However, this can be configured using the 'config/tidy/identities' endpoint. This tidy
+action can be triggered via the API as well, using the 'tidy/identities' endpoint.
+`
+
+const pathListIdentityWhitelistHelpSyn = `
+Lists the items present in the identity whitelist.
+`
+
+const pathListIdentityWhitelistHelpDesc = `
+The entries in the identity whitelist is keyed off of the EC2 instance IDs.
+This endpoint lists all the entries present in the identity whitelist, both
+expired and un-expired entries. Use 'tidy/identities' endpoint to clean-up
+the whitelist of identities.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_login.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_login.go
new file mode 100644
index 0000000..bf50898
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_login.go
@@ -0,0 +1,1477 @@
+package awsauth
+
+import (
+ "crypto/subtle"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/pem"
+ "encoding/xml"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/ec2"
+ "github.com/aws/aws-sdk-go/service/iam"
+ "github.com/fullsailor/pkcs7"
+ "github.com/hashicorp/go-cleanhttp"
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+const (
+ reauthenticationDisabledNonce = "reauthentication-disabled-nonce"
+ iamAuthType = "iam"
+ ec2AuthType = "ec2"
+ ec2EntityType = "ec2_instance"
+)
+
+func pathLogin(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "login$",
+ Fields: map[string]*framework.FieldSchema{
+ "role": {
+ Type: framework.TypeString,
+ Description: `Name of the role against which the login is being attempted.
+If 'role' is not specified, then the login endpoint looks for a role
+bearing the name of the AMI ID of the EC2 instance that is trying to login.
+If a matching role is not found, login fails.`,
+ },
+
+ "pkcs7": {
+ Type: framework.TypeString,
+ Description: `PKCS7 signature of the identity document when using an auth_type
+of ec2.`,
+ },
+
+ "nonce": {
+ Type: framework.TypeString,
+ Description: `The nonce to be used for subsequent login requests when
+auth_type is ec2. If this parameter is not specified at
+all and if reauthentication is allowed, then the backend will generate a random
+nonce, attaches it to the instance's identity-whitelist entry and returns the
+nonce back as part of auth metadata. This value should be used with further
+login requests, to establish client authenticity. Clients can choose to set a
+custom nonce if preferred, in which case, it is recommended that clients provide
+a strong nonce. If a nonce is provided but with an empty value, it indicates
+intent to disable reauthentication. Note that, when 'disallow_reauthentication'
+option is enabled on either the role or the role tag, the 'nonce' holds no
+significance.`,
+ },
+
+ "iam_http_request_method": {
+ Type: framework.TypeString,
+ Description: `HTTP method to use for the AWS request when auth_type is
+iam. This must match what has been signed in the
+presigned request. Currently, POST is the only supported value`,
+ },
+
+ "iam_request_url": {
+ Type: framework.TypeString,
+ Description: `Base64-encoded full URL against which to make the AWS request
+when using iam auth_type.`,
+ },
+
+ "iam_request_body": {
+ Type: framework.TypeString,
+ Description: `Base64-encoded request body when auth_type is iam.
+This must match the request body included in the signature.`,
+ },
+ "iam_request_headers": {
+ Type: framework.TypeString,
+ Description: `Base64-encoded JSON representation of the request headers when auth_type is
+iam. This must at a minimum include the headers over
+which AWS has included a signature.`,
+ },
+ "identity": {
+ Type: framework.TypeString,
+ Description: `Base64 encoded EC2 instance identity document. This needs to be supplied along
+with the 'signature' parameter. If using 'curl' for fetching the identity
+document, consider using the option '-w 0' while piping the output to 'base64'
+binary.`,
+ },
+ "signature": {
+ Type: framework.TypeString,
+ Description: `Base64 encoded SHA256 RSA signature of the instance identity document. This
+needs to be supplied along with 'identity' parameter.`,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathLoginUpdate,
+ },
+
+ HelpSynopsis: pathLoginSyn,
+ HelpDescription: pathLoginDesc,
+ }
+}
+
+// instanceIamRoleARN fetches the IAM role ARN associated with the given
+// instance profile name
+func (b *backend) instanceIamRoleARN(iamClient *iam.IAM, instanceProfileName string) (string, error) {
+ if iamClient == nil {
+ return "", fmt.Errorf("nil iamClient")
+ }
+ if instanceProfileName == "" {
+ return "", fmt.Errorf("missing instance profile name")
+ }
+
+ profile, err := iamClient.GetInstanceProfile(&iam.GetInstanceProfileInput{
+ InstanceProfileName: aws.String(instanceProfileName),
+ })
+ if err != nil {
+ return "", err
+ }
+ if profile == nil {
+ return "", fmt.Errorf("nil output while getting instance profile details")
+ }
+
+ if profile.InstanceProfile == nil {
+ return "", fmt.Errorf("nil instance profile in the output of instance profile details")
+ }
+
+ if profile.InstanceProfile.Roles == nil || len(profile.InstanceProfile.Roles) != 1 {
+ return "", fmt.Errorf("invalid roles in the output of instance profile details")
+ }
+
+ if profile.InstanceProfile.Roles[0].Arn == nil {
+ return "", fmt.Errorf("nil role ARN in the output of instance profile details")
+ }
+
+ return *profile.InstanceProfile.Roles[0].Arn, nil
+}
+
+// validateInstance queries the status of the EC2 instance using AWS EC2 API
+// and checks if the instance is running and is healthy
+func (b *backend) validateInstance(s logical.Storage, instanceID, region, accountID string) (*ec2.Instance, error) {
+
+ // Check if an STS configuration exists for the AWS account
+ sts, err := b.lockedAwsStsEntry(s, accountID)
+ if err != nil {
+ return nil, fmt.Errorf("error fetching STS config for account ID %q: %q\n", accountID, err)
+ }
+ // An empty STS role signifies the master account
+ stsRole := ""
+ if sts != nil {
+ stsRole = sts.StsRole
+ }
+
+ // Create an EC2 client to pull the instance information
+ ec2Client, err := b.clientEC2(s, region, stsRole)
+ if err != nil {
+ return nil, err
+ }
+
+ status, err := ec2Client.DescribeInstances(&ec2.DescribeInstancesInput{
+ Filters: []*ec2.Filter{
+ &ec2.Filter{
+ Name: aws.String("instance-id"),
+ Values: []*string{
+ aws.String(instanceID),
+ },
+ },
+ },
+ })
+ if err != nil {
+ return nil, fmt.Errorf("error fetching description for instance ID %q: %q\n", instanceID, err)
+ }
+ if status == nil {
+ return nil, fmt.Errorf("nil output from describe instances")
+ }
+ if len(status.Reservations) == 0 {
+ return nil, fmt.Errorf("no reservations found in instance description")
+
+ }
+ if len(status.Reservations[0].Instances) == 0 {
+ return nil, fmt.Errorf("no instance details found in reservations")
+ }
+ if *status.Reservations[0].Instances[0].InstanceId != instanceID {
+ return nil, fmt.Errorf("expected instance ID not matching the instance ID in the instance description")
+ }
+ if status.Reservations[0].Instances[0].State == nil {
+ return nil, fmt.Errorf("instance state in instance description is nil")
+ }
+ if *status.Reservations[0].Instances[0].State.Name != "running" {
+ return nil, fmt.Errorf("instance is not in 'running' state")
+ }
+ return status.Reservations[0].Instances[0], nil
+}
+
+// validateMetadata matches the given client nonce and pending time with the
+// one cached in the identity whitelist during the previous login. But, if
+// reauthentication is disabled, login attempt is failed immediately.
+func validateMetadata(clientNonce, pendingTime string, storedIdentity *whitelistIdentity, roleEntry *awsRoleEntry) error {
+ // For sanity
+ if !storedIdentity.DisallowReauthentication && storedIdentity.ClientNonce == "" {
+ return fmt.Errorf("client nonce missing in stored identity")
+ }
+
+ // If reauthentication is disabled or if the nonce supplied matches a
+ // predefied nonce which indicates reauthentication to be disabled,
+ // authentication will not succeed.
+ if storedIdentity.DisallowReauthentication ||
+ subtle.ConstantTimeCompare([]byte(reauthenticationDisabledNonce), []byte(clientNonce)) == 1 {
+ return fmt.Errorf("reauthentication is disabled")
+ }
+
+ givenPendingTime, err := time.Parse(time.RFC3339, pendingTime)
+ if err != nil {
+ return err
+ }
+
+ storedPendingTime, err := time.Parse(time.RFC3339, storedIdentity.PendingTime)
+ if err != nil {
+ return err
+ }
+
+ // When the presented client nonce does not match the cached entry, it
+ // is either that a rogue client is trying to login or that a valid
+ // client suffered a migration. The migration is detected via
+ // pendingTime in the instance metadata, which sadly is only updated
+ // when an instance is stopped and started but *not* when the instance
+ // is rebooted. If reboot survivability is needed, either
+ // instrumentation to delete the instance ID from the whitelist is
+ // necessary, or the client must durably store the nonce.
+ //
+ // If the `allow_instance_migration` property of the registered role is
+ // enabled, then the client nonce mismatch is ignored, as long as the
+ // pending time in the presented instance identity document is newer
+ // than the cached pending time. The new pendingTime is stored and used
+ // for future checks.
+ //
+ // This is a weak criterion and hence the `allow_instance_migration`
+ // option should be used with caution.
+ if subtle.ConstantTimeCompare([]byte(clientNonce), []byte(storedIdentity.ClientNonce)) != 1 {
+ if !roleEntry.AllowInstanceMigration {
+ return fmt.Errorf("client nonce mismatch")
+ }
+ if roleEntry.AllowInstanceMigration && !givenPendingTime.After(storedPendingTime) {
+ return fmt.Errorf("client nonce mismatch and instance meta-data incorrect")
+ }
+ }
+
+ // Ensure that the 'pendingTime' on the given identity document is not
+ // before the 'pendingTime' that was used for previous login. This
+ // disallows old metadata documents from being used to perform login.
+ if givenPendingTime.Before(storedPendingTime) {
+ return fmt.Errorf("instance meta-data is older than the one used for previous login")
+ }
+ return nil
+}
+
+// Verifies the integrity of the instance identity document using its SHA256
+// RSA signature. After verification, returns the unmarshaled instance identity
+// document.
+func (b *backend) verifyInstanceIdentitySignature(s logical.Storage, identityBytes, signatureBytes []byte) (*identityDocument, error) {
+ if len(identityBytes) == 0 {
+ return nil, fmt.Errorf("missing instance identity document")
+ }
+
+ if len(signatureBytes) == 0 {
+ return nil, fmt.Errorf("missing SHA256 RSA signature of the instance identity document")
+ }
+
+ // Get the public certificates that are used to verify the signature.
+ // This returns a slice of certificates containing the default
+ // certificate and all the registered certificates via
+ // 'config/certificate/' endpoint, for verifying the RSA
+ // digest.
+ publicCerts, err := b.awsPublicCertificates(s, false)
+ if err != nil {
+ return nil, err
+ }
+ if publicCerts == nil || len(publicCerts) == 0 {
+ return nil, fmt.Errorf("certificates to verify the signature are not found")
+ }
+
+ // Check if any of the certs registered at the backend can verify the
+ // signature
+ for _, cert := range publicCerts {
+ err := cert.CheckSignature(x509.SHA256WithRSA, identityBytes, signatureBytes)
+ if err == nil {
+ var identityDoc identityDocument
+ if decErr := jsonutil.DecodeJSON(identityBytes, &identityDoc); decErr != nil {
+ return nil, decErr
+ }
+ return &identityDoc, nil
+ }
+ }
+
+ return nil, fmt.Errorf("instance identity verification using SHA256 RSA signature is unsuccessful")
+}
+
+// Verifies the correctness of the authenticated attributes present in the PKCS#7
+// signature. After verification, extracts the instance identity document from the
+// signature, parses it and returns it.
+func (b *backend) parseIdentityDocument(s logical.Storage, pkcs7B64 string) (*identityDocument, error) {
+ // Insert the header and footer for the signature to be able to pem decode it
+ pkcs7B64 = fmt.Sprintf("-----BEGIN PKCS7-----\n%s\n-----END PKCS7-----", pkcs7B64)
+
+ // Decode the PEM encoded signature
+ pkcs7BER, pkcs7Rest := pem.Decode([]byte(pkcs7B64))
+ if len(pkcs7Rest) != 0 {
+ return nil, fmt.Errorf("failed to decode the PEM encoded PKCS#7 signature")
+ }
+
+ // Parse the signature from asn1 format into a struct
+ pkcs7Data, err := pkcs7.Parse(pkcs7BER.Bytes)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse the BER encoded PKCS#7 signature: %v\n", err)
+ }
+
+ // Get the public certificates that are used to verify the signature.
+ // This returns a slice of certificates containing the default certificate
+ // and all the registered certificates via 'config/certificate/' endpoint
+ publicCerts, err := b.awsPublicCertificates(s, true)
+ if err != nil {
+ return nil, err
+ }
+ if publicCerts == nil || len(publicCerts) == 0 {
+ return nil, fmt.Errorf("certificates to verify the signature are not found")
+ }
+
+ // Before calling Verify() on the PKCS#7 struct, set the certificates to be used
+ // to verify the contents in the signer information.
+ pkcs7Data.Certificates = publicCerts
+
+ // Verify extracts the authenticated attributes in the PKCS#7 signature, and verifies
+ // the authenticity of the content using 'dsa.PublicKey' embedded in the public certificate.
+ if pkcs7Data.Verify() != nil {
+ return nil, fmt.Errorf("failed to verify the signature")
+ }
+
+ // Check if the signature has content inside of it
+ if len(pkcs7Data.Content) == 0 {
+ return nil, fmt.Errorf("instance identity document could not be found in the signature")
+ }
+
+ var identityDoc identityDocument
+ if err := jsonutil.DecodeJSON(pkcs7Data.Content, &identityDoc); err != nil {
+ return nil, err
+ }
+
+ return &identityDoc, nil
+}
+
+func (b *backend) pathLoginUpdate(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ anyEc2, allEc2 := hasValuesForEc2Auth(data)
+ anyIam, allIam := hasValuesForIamAuth(data)
+ switch {
+ case anyEc2 && anyIam:
+ return logical.ErrorResponse("supplied auth values for both ec2 and iam auth types"), nil
+ case anyEc2 && !allEc2:
+ return logical.ErrorResponse("supplied some of the auth values for the ec2 auth type but not all"), nil
+ case anyEc2:
+ return b.pathLoginUpdateEc2(req, data)
+ case anyIam && !allIam:
+ return logical.ErrorResponse("supplied some of the auth values for the iam auth type but not all"), nil
+ case anyIam:
+ return b.pathLoginUpdateIam(req, data)
+ default:
+ return logical.ErrorResponse("didn't supply required authentication values"), nil
+ }
+}
+
+// Returns whether the EC2 instance meets the requirements of the particular
+// AWS role entry.
+// The first error return value is whether there's some sort of validation
+// error that means the instance doesn't meet the role requirements
+// The second error return value indicates whether there's an error in even
+// trying to validate those requirements
+func (b *backend) verifyInstanceMeetsRoleRequirements(
+ s logical.Storage, instance *ec2.Instance, roleEntry *awsRoleEntry, roleName string, identityDoc *identityDocument) (error, error) {
+
+ switch {
+ case instance == nil:
+ return nil, fmt.Errorf("nil instance")
+ case roleEntry == nil:
+ return nil, fmt.Errorf("nil roleEntry")
+ case identityDoc == nil:
+ return nil, fmt.Errorf("nil identityDoc")
+ }
+
+ // Verify that the AccountID of the instance trying to login matches the
+ // AccountID specified as a constraint on role
+ if roleEntry.BoundAccountID != "" && identityDoc.AccountID != roleEntry.BoundAccountID {
+ return fmt.Errorf("account ID %q does not belong to role %q", identityDoc.AccountID, roleName), nil
+ }
+
+ // Verify that the AMI ID of the instance trying to login matches the
+ // AMI ID specified as a constraint on the role.
+ //
+ // Here, we're making a tradeoff and pulling the AMI ID out of the EC2
+ // API rather than the signed instance identity doc. They *should* match.
+ // This means we require an EC2 API call to retrieve the AMI ID, but we're
+ // already calling the API to validate the Instance ID anyway, so it shouldn't
+ // matter. The benefit is that we have the exact same code whether auth_type
+ // is ec2 or iam.
+ if roleEntry.BoundAmiID != "" {
+ if instance.ImageId == nil {
+ return nil, fmt.Errorf("AMI ID in the instance description is nil")
+ }
+ if roleEntry.BoundAmiID != *instance.ImageId {
+ return fmt.Errorf("AMI ID %q does not belong to role %q", instance.ImageId, roleName), nil
+ }
+ }
+
+ // Validate the SubnetID if corresponding bound was set on the role
+ if roleEntry.BoundSubnetID != "" {
+ if instance.SubnetId == nil {
+ return nil, fmt.Errorf("subnet ID in the instance description is nil")
+ }
+ if roleEntry.BoundSubnetID != *instance.SubnetId {
+ return fmt.Errorf("subnet ID %q does not satisfy the constraint on role %q", *instance.SubnetId, roleName), nil
+ }
+ }
+
+ // Validate the VpcID if corresponding bound was set on the role
+ if roleEntry.BoundVpcID != "" {
+ if instance.VpcId == nil {
+ return nil, fmt.Errorf("VPC ID in the instance description is nil")
+ }
+ if roleEntry.BoundVpcID != *instance.VpcId {
+ return fmt.Errorf("VPC ID %q does not satisfy the constraint on role %q", *instance.VpcId, roleName), nil
+ }
+ }
+
+ // Check if the IAM instance profile ARN of the instance trying to
+ // login, matches the IAM instance profile ARN specified as a constraint
+ // on the role
+ if roleEntry.BoundIamInstanceProfileARN != "" {
+ if instance.IamInstanceProfile == nil {
+ return nil, fmt.Errorf("IAM instance profile in the instance description is nil")
+ }
+ if instance.IamInstanceProfile.Arn == nil {
+ return nil, fmt.Errorf("IAM instance profile ARN in the instance description is nil")
+ }
+ iamInstanceProfileARN := *instance.IamInstanceProfile.Arn
+ if !strings.HasPrefix(iamInstanceProfileARN, roleEntry.BoundIamInstanceProfileARN) {
+ return fmt.Errorf("IAM instance profile ARN %q does not satisfy the constraint role %q", iamInstanceProfileARN, roleName), nil
+ }
+ }
+
+ // Check if the IAM role ARN of the instance trying to login, matches
+ // the IAM role ARN specified as a constraint on the role.
+ if roleEntry.BoundIamRoleARN != "" {
+ if instance.IamInstanceProfile == nil {
+ return nil, fmt.Errorf("IAM instance profile in the instance description is nil")
+ }
+ if instance.IamInstanceProfile.Arn == nil {
+ return nil, fmt.Errorf("IAM instance profile ARN in the instance description is nil")
+ }
+
+ // Fetch the instance profile ARN from the instance description
+ iamInstanceProfileARN := *instance.IamInstanceProfile.Arn
+
+ if iamInstanceProfileARN == "" {
+ return nil, fmt.Errorf("IAM instance profile ARN in the instance description is empty")
+ }
+
+ // Extract out the instance profile name from the instance
+ // profile ARN
+ iamInstanceProfileARNSlice := strings.SplitAfter(iamInstanceProfileARN, ":instance-profile/")
+ iamInstanceProfileName := iamInstanceProfileARNSlice[len(iamInstanceProfileARNSlice)-1]
+
+ if iamInstanceProfileName == "" {
+ return nil, fmt.Errorf("failed to extract out IAM instance profile name from IAM instance profile ARN")
+ }
+
+ // Check if an STS configuration exists for the AWS account
+ sts, err := b.lockedAwsStsEntry(s, identityDoc.AccountID)
+ if err != nil {
+ return fmt.Errorf("error fetching STS config for account ID %q: %q\n", identityDoc.AccountID, err), nil
+ }
+ // An empty STS role signifies the master account
+ stsRole := ""
+ if sts != nil {
+ stsRole = sts.StsRole
+ }
+
+ // Use instance profile ARN to fetch the associated role ARN
+ iamClient, err := b.clientIAM(s, identityDoc.Region, stsRole)
+ if err != nil {
+ return nil, fmt.Errorf("could not fetch IAM client: %v", err)
+ } else if iamClient == nil {
+ return nil, fmt.Errorf("received a nil iamClient")
+ }
+ iamRoleARN, err := b.instanceIamRoleARN(iamClient, iamInstanceProfileName)
+ if err != nil {
+ return nil, fmt.Errorf("IAM role ARN could not be fetched: %v", err)
+ }
+ if iamRoleARN == "" {
+ return nil, fmt.Errorf("IAM role ARN could not be fetched")
+ }
+
+ if !strings.HasPrefix(iamRoleARN, roleEntry.BoundIamRoleARN) {
+ return fmt.Errorf("IAM role ARN %q does not satisfy the constraint role %q", iamRoleARN, roleName), nil
+ }
+ }
+
+ return nil, nil
+}
+
+// pathLoginUpdateEc2 is used to create a Vault token by the EC2 instances
+// by providing the pkcs7 signature of the instance identity document
+// and a client created nonce. Client nonce is optional if 'disallow_reauthentication'
+// option is enabled on the registered role.
+func (b *backend) pathLoginUpdateEc2(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ identityDocB64 := data.Get("identity").(string)
+ var identityDocBytes []byte
+ var err error
+ if identityDocB64 != "" {
+ identityDocBytes, err = base64.StdEncoding.DecodeString(identityDocB64)
+ if err != nil || len(identityDocBytes) == 0 {
+ return logical.ErrorResponse("failed to base64 decode the instance identity document"), nil
+ }
+ }
+
+ signatureB64 := data.Get("signature").(string)
+ var signatureBytes []byte
+ if signatureB64 != "" {
+ signatureBytes, err = base64.StdEncoding.DecodeString(signatureB64)
+ if err != nil {
+ return logical.ErrorResponse("failed to base64 decode the SHA256 RSA signature of the instance identity document"), nil
+ }
+ }
+
+ pkcs7B64 := data.Get("pkcs7").(string)
+
+ // Either the pkcs7 signature of the instance identity document, or
+ // the identity document itself along with its SHA256 RSA signature
+ // needs to be provided.
+ if pkcs7B64 == "" && (len(identityDocBytes) == 0 && len(signatureBytes) == 0) {
+ return logical.ErrorResponse("either pkcs7 or a tuple containing the instance identity document and its SHA256 RSA signature needs to be provided"), nil
+ } else if pkcs7B64 != "" && (len(identityDocBytes) != 0 && len(signatureBytes) != 0) {
+ return logical.ErrorResponse("both pkcs7 and a tuple containing the instance identity document and its SHA256 RSA signature is supplied; provide only one"), nil
+ }
+
+ // Verify the signature of the identity document and unmarshal it
+ var identityDocParsed *identityDocument
+ if pkcs7B64 != "" {
+ identityDocParsed, err = b.parseIdentityDocument(req.Storage, pkcs7B64)
+ if err != nil {
+ return nil, err
+ }
+ if identityDocParsed == nil {
+ return logical.ErrorResponse("failed to verify the instance identity document using pkcs7"), nil
+ }
+ } else {
+ identityDocParsed, err = b.verifyInstanceIdentitySignature(req.Storage, identityDocBytes, signatureBytes)
+ if err != nil {
+ return nil, err
+ }
+ if identityDocParsed == nil {
+ return logical.ErrorResponse("failed to verify the instance identity document using the SHA256 RSA digest"), nil
+ }
+ }
+
+ roleName := data.Get("role").(string)
+
+ // If roleName is not supplied, a role in the name of the instance's AMI ID will be looked for
+ if roleName == "" {
+ roleName = identityDocParsed.AmiID
+ }
+
+ // Get the entry for the role used by the instance
+ roleEntry, err := b.lockedAWSRole(req.Storage, roleName)
+ if err != nil {
+ return nil, err
+ }
+ if roleEntry == nil {
+ return logical.ErrorResponse(fmt.Sprintf("entry for role %q not found", roleName)), nil
+ }
+
+ if roleEntry.AuthType != ec2AuthType {
+ return logical.ErrorResponse(fmt.Sprintf("auth method ec2 not allowed for role %s", roleName)), nil
+ }
+
+ // Validate the instance ID by making a call to AWS EC2 DescribeInstances API
+ // and fetching the instance description. Validation succeeds only if the
+ // instance is in 'running' state.
+ instance, err := b.validateInstance(req.Storage, identityDocParsed.InstanceID, identityDocParsed.Region, identityDocParsed.AccountID)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("failed to verify instance ID: %v", err)), nil
+ }
+
+ // Verify that the `Region` of the instance trying to login matches the
+ // `Region` specified as a constraint on role
+ if roleEntry.BoundRegion != "" && identityDocParsed.Region != roleEntry.BoundRegion {
+ return logical.ErrorResponse(fmt.Sprintf("Region %q does not satisfy the constraint on role %q", identityDocParsed.Region, roleName)), nil
+ }
+
+ validationError, err := b.verifyInstanceMeetsRoleRequirements(req.Storage, instance, roleEntry, roleName, identityDocParsed)
+ if err != nil {
+ return nil, err
+ }
+ if validationError != nil {
+ return logical.ErrorResponse(fmt.Sprintf("Error validating instance: %v", validationError)), nil
+ }
+
+ // Get the entry from the identity whitelist, if there is one
+ storedIdentity, err := whitelistIdentityEntry(req.Storage, identityDocParsed.InstanceID)
+ if err != nil {
+ return nil, err
+ }
+
+ // disallowReauthentication value that gets cached at the stored
+ // identity-whitelist entry is determined not just by the role entry.
+ // If client explicitly sets nonce to be empty, it implies intent to
+ // disable reauthentication. Also, role tag can override the 'false'
+ // value with 'true' (the other way around is not allowed).
+
+ // Read the value from the role entry
+ disallowReauthentication := roleEntry.DisallowReauthentication
+
+ clientNonce := ""
+
+ // Check if the nonce is supplied by the client
+ clientNonceRaw, clientNonceSupplied := data.GetOk("nonce")
+ if clientNonceSupplied {
+ clientNonce = clientNonceRaw.(string)
+
+ // Nonce explicitly set to empty implies intent to disable
+ // reauthentication by the client. Set a predefined nonce which
+ // indicates reauthentication being disabled.
+ if clientNonce == "" {
+ clientNonce = reauthenticationDisabledNonce
+
+ // Ensure that the intent lands in the whitelist
+ disallowReauthentication = true
+ }
+ }
+
+ // This is NOT a first login attempt from the client
+ if storedIdentity != nil {
+ // Check if the client nonce match the cached nonce and if the pending time
+ // of the identity document is not before the pending time of the document
+ // with which previous login was made. If 'allow_instance_migration' is
+ // enabled on the registered role, client nonce requirement is relaxed.
+ if err = validateMetadata(clientNonce, identityDocParsed.PendingTime, storedIdentity, roleEntry); err != nil {
+ return logical.ErrorResponse(err.Error()), nil
+ }
+
+ // Don't let subsequent login attempts to bypass in initial
+ // intent of disabling reauthentication, despite the properties
+ // of role getting updated. For example: Role has the value set
+ // to 'false', a role-tag login sets the value to 'true', then
+ // role gets updated to not use a role-tag, and a login attempt
+ // is made with role's value set to 'false'. Removing the entry
+ // from the identity-whitelist should be the only way to be
+ // able to login from the instance again.
+ disallowReauthentication = disallowReauthentication || storedIdentity.DisallowReauthentication
+ }
+
+ // If we reach this point without erroring and if the client nonce was
+ // not supplied, a first time login is implied and that the client
+ // intends that the nonce be generated by the backend. Create a random
+ // nonce to be associated for the instance ID.
+ if !clientNonceSupplied {
+ if clientNonce, err = uuid.GenerateUUID(); err != nil {
+ return nil, fmt.Errorf("failed to generate random nonce")
+ }
+ }
+
+ // Load the current values for max TTL and policies from the role entry,
+ // before checking for overriding max TTL in the role tag. The shortest
+ // max TTL is used to cap the token TTL; the longest max TTL is used to
+ // make the whitelist entry as long as possible as it controls for replay
+ // attacks.
+ shortestMaxTTL := b.System().MaxLeaseTTL()
+ longestMaxTTL := b.System().MaxLeaseTTL()
+ if roleEntry.MaxTTL > time.Duration(0) && roleEntry.MaxTTL < shortestMaxTTL {
+ shortestMaxTTL = roleEntry.MaxTTL
+ }
+ if roleEntry.MaxTTL > longestMaxTTL {
+ longestMaxTTL = roleEntry.MaxTTL
+ }
+
+ policies := roleEntry.Policies
+ rTagMaxTTL := time.Duration(0)
+ var roleTagResp *roleTagLoginResponse
+ if roleEntry.RoleTag != "" {
+ roleTagResp, err := b.handleRoleTagLogin(req.Storage, roleName, roleEntry, instance)
+ if err != nil {
+ return nil, err
+ }
+ if roleTagResp == nil {
+ return logical.ErrorResponse("failed to fetch and verify the role tag"), nil
+ }
+ }
+
+ if roleTagResp != nil {
+ // Role tag is enabled on the role.
+ //
+
+ // Overwrite the policies with the ones returned from processing the role tag
+ // If there are no policies on the role tag, policies on the role are inherited.
+ // If policies on role tag are set, by this point, it is verified that it is a subset of the
+ // policies on the role. So, apply only those.
+ if len(roleTagResp.Policies) != 0 {
+ policies = roleTagResp.Policies
+ }
+
+ // If roleEntry had disallowReauthentication set to 'true', do not reset it
+ // to 'false' based on role tag having it not set. But, if role tag had it set,
+ // be sure to override the value.
+ if !disallowReauthentication {
+ disallowReauthentication = roleTagResp.DisallowReauthentication
+ }
+
+ // Cache the value of role tag's max_ttl value
+ rTagMaxTTL = roleTagResp.MaxTTL
+
+ // Scope the shortestMaxTTL to the value set on the role tag
+ if roleTagResp.MaxTTL > time.Duration(0) && roleTagResp.MaxTTL < shortestMaxTTL {
+ shortestMaxTTL = roleTagResp.MaxTTL
+ }
+ if roleTagResp.MaxTTL > longestMaxTTL {
+ longestMaxTTL = roleTagResp.MaxTTL
+ }
+ }
+
+ // Save the login attempt in the identity whitelist
+ currentTime := time.Now()
+ if storedIdentity == nil {
+ // Role, ClientNonce and CreationTime of the identity entry,
+ // once set, should never change.
+ storedIdentity = &whitelistIdentity{
+ Role: roleName,
+ ClientNonce: clientNonce,
+ CreationTime: currentTime,
+ }
+ }
+
+ // DisallowReauthentication, PendingTime, LastUpdatedTime and
+ // ExpirationTime may change.
+ storedIdentity.LastUpdatedTime = currentTime
+ storedIdentity.ExpirationTime = currentTime.Add(longestMaxTTL)
+ storedIdentity.PendingTime = identityDocParsed.PendingTime
+ storedIdentity.DisallowReauthentication = disallowReauthentication
+
+ // Don't cache the nonce if DisallowReauthentication is set
+ if storedIdentity.DisallowReauthentication {
+ storedIdentity.ClientNonce = ""
+ }
+
+ // Sanitize the nonce to a reasonable length
+ if len(clientNonce) > 128 && !storedIdentity.DisallowReauthentication {
+ return logical.ErrorResponse("client nonce exceeding the limit of 128 characters"), nil
+ }
+
+ if err = setWhitelistIdentityEntry(req.Storage, identityDocParsed.InstanceID, storedIdentity); err != nil {
+ return nil, err
+ }
+
+ resp := &logical.Response{
+ Auth: &logical.Auth{
+ Period: roleEntry.Period,
+ Policies: policies,
+ Metadata: map[string]string{
+ "instance_id": identityDocParsed.InstanceID,
+ "region": identityDocParsed.Region,
+ "account_id": identityDocParsed.AccountID,
+ "role_tag_max_ttl": rTagMaxTTL.String(),
+ "role": roleName,
+ "ami_id": identityDocParsed.AmiID,
+ },
+ LeaseOptions: logical.LeaseOptions{
+ Renewable: true,
+ TTL: roleEntry.TTL,
+ },
+ },
+ }
+
+ // Return the nonce only if reauthentication is allowed
+ if !disallowReauthentication {
+ // Echo the client nonce back. If nonce param was not supplied
+ // to the endpoint at all (setting it to empty string does not
+ // qualify here), callers should extract out the nonce from
+ // this field for reauthentication requests.
+ resp.Auth.Metadata["nonce"] = clientNonce
+ }
+
+ if roleEntry.Period > time.Duration(0) {
+ resp.Auth.TTL = roleEntry.Period
+ } else {
+ // Cap the TTL value.
+ shortestTTL := b.System().DefaultLeaseTTL()
+ if roleEntry.TTL > time.Duration(0) && roleEntry.TTL < shortestTTL {
+ shortestTTL = roleEntry.TTL
+ }
+ if shortestMaxTTL < shortestTTL {
+ resp.AddWarning(fmt.Sprintf("Effective ttl of %q exceeded the effective max_ttl of %q; ttl value is capped appropriately", (shortestTTL / time.Second).String(), (shortestMaxTTL / time.Second).String()))
+ shortestTTL = shortestMaxTTL
+ }
+ resp.Auth.TTL = shortestTTL
+ }
+
+ return resp, nil
+
+}
+
+// handleRoleTagLogin is used to fetch the role tag of the instance and
+// verifies it to be correct. Then the policies for the login request will be
+// set off of the role tag, if certain creteria satisfies.
+func (b *backend) handleRoleTagLogin(s logical.Storage, roleName string, roleEntry *awsRoleEntry, instance *ec2.Instance) (*roleTagLoginResponse, error) {
+ if roleEntry == nil {
+ return nil, fmt.Errorf("nil role entry")
+ }
+ if instance == nil {
+ return nil, fmt.Errorf("nil instance")
+ }
+
+ // Input validation on instance is not performed here considering
+ // that it would have been done in validateInstance method.
+ tags := instance.Tags
+ if tags == nil || len(tags) == 0 {
+ return nil, fmt.Errorf("missing tag with key %q on the instance", roleEntry.RoleTag)
+ }
+
+ // Iterate through the tags attached on the instance and look for
+ // a tag with its 'key' matching the expected role tag value.
+ rTagValue := ""
+ for _, tagItem := range tags {
+ if tagItem.Key != nil && *tagItem.Key == roleEntry.RoleTag {
+ rTagValue = *tagItem.Value
+ break
+ }
+ }
+
+ // If 'role_tag' is enabled on the role, and if a corresponding tag is not found
+ // to be attached to the instance, fail.
+ if rTagValue == "" {
+ return nil, fmt.Errorf("missing tag with key %q on the instance", roleEntry.RoleTag)
+ }
+
+ // Parse the role tag into a struct, extract the plaintext part of it and verify its HMAC
+ rTag, err := b.parseAndVerifyRoleTagValue(s, rTagValue)
+ if err != nil {
+ return nil, err
+ }
+
+ // Check if the role name with which this login is being made is same
+ // as the role name embedded in the tag.
+ if rTag.Role != roleName {
+ return nil, fmt.Errorf("role on the tag is not matching the role supplied")
+ }
+
+ // If instance_id was set on the role tag, check if the same instance is attempting to login
+ if rTag.InstanceID != "" && rTag.InstanceID != *instance.InstanceId {
+ return nil, fmt.Errorf("role tag is being used by an unauthorized instance.")
+ }
+
+ // Check if the role tag is blacklisted
+ blacklistEntry, err := b.lockedBlacklistRoleTagEntry(s, rTagValue)
+ if err != nil {
+ return nil, err
+ }
+ if blacklistEntry != nil {
+ return nil, fmt.Errorf("role tag is blacklisted")
+ }
+
+ // Ensure that the policies on the RoleTag is a subset of policies on the role
+ if !strutil.StrListSubset(roleEntry.Policies, rTag.Policies) {
+ return nil, fmt.Errorf("policies on the role tag must be subset of policies on the role")
+ }
+
+ return &roleTagLoginResponse{
+ Policies: rTag.Policies,
+ MaxTTL: rTag.MaxTTL,
+ DisallowReauthentication: rTag.DisallowReauthentication,
+ }, nil
+}
+
+// pathLoginRenew is used to renew an authenticated token
+func (b *backend) pathLoginRenew(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ authType, ok := req.Auth.Metadata["auth_type"]
+ if !ok {
+ // backwards compatibility for clients that have leases from before we added auth_type
+ authType = ec2AuthType
+ }
+
+ if authType == ec2AuthType {
+ return b.pathLoginRenewEc2(req, data)
+ } else if authType == iamAuthType {
+ return b.pathLoginRenewIam(req, data)
+ } else {
+ return nil, fmt.Errorf("unrecognized auth_type: %q", authType)
+ }
+}
+
+func (b *backend) pathLoginRenewIam(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ canonicalArn := req.Auth.Metadata["canonical_arn"]
+ if canonicalArn == "" {
+ return nil, fmt.Errorf("unable to retrieve canonical ARN from metadata during renewal")
+ }
+
+ roleName := req.Auth.InternalData["role_name"].(string)
+ if roleName == "" {
+ return nil, fmt.Errorf("error retrieving role_name during renewal")
+ }
+ roleEntry, err := b.lockedAWSRole(req.Storage, roleName)
+ if err != nil {
+ return nil, err
+ }
+ if roleEntry == nil {
+ return nil, fmt.Errorf("role entry not found")
+ }
+
+ if entityType, ok := req.Auth.Metadata["inferred_entity_type"]; !ok {
+ if entityType == ec2EntityType {
+ instanceID, ok := req.Auth.Metadata["inferred_entity_id"]
+ if !ok {
+ return nil, fmt.Errorf("no inferred entity ID in auth metadata")
+ }
+ instanceRegion, ok := req.Auth.Metadata["inferred_aws_region"]
+ if !ok {
+ return nil, fmt.Errorf("no inferred AWS region in auth metadata")
+ }
+ _, err := b.validateInstance(req.Storage, instanceID, instanceRegion, req.Auth.Metadata["accountID"])
+ if err != nil {
+ return nil, fmt.Errorf("failed to verify instance ID %q: %v", instanceID, err)
+ }
+ } else {
+ return nil, fmt.Errorf("unrecognized entity_type in metadata: %q", entityType)
+ }
+ }
+
+ if roleEntry.BoundIamPrincipalARN != canonicalArn {
+ return nil, fmt.Errorf("role no longer bound to arn %q", canonicalArn)
+ }
+
+ return framework.LeaseExtend(roleEntry.TTL, roleEntry.MaxTTL, b.System())(req, data)
+
+}
+
+func (b *backend) pathLoginRenewEc2(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ instanceID := req.Auth.Metadata["instance_id"]
+ if instanceID == "" {
+ return nil, fmt.Errorf("unable to fetch instance ID from metadata during renewal")
+ }
+
+ region := req.Auth.Metadata["region"]
+ if region == "" {
+ return nil, fmt.Errorf("unable to fetch region from metadata during renewal")
+ }
+
+ // Ensure backwards compatibility for older clients without account_id saved in metadata
+ accountID, ok := req.Auth.Metadata["account_id"]
+ if ok {
+ if accountID == "" {
+ return nil, fmt.Errorf("unable to fetch account_id from metadata during renewal")
+ }
+ }
+
+ // Cross check that the instance is still in 'running' state
+ _, err := b.validateInstance(req.Storage, instanceID, region, accountID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to verify instance ID %q: %q", instanceID, err)
+ }
+
+ storedIdentity, err := whitelistIdentityEntry(req.Storage, instanceID)
+ if err != nil {
+ return nil, err
+ }
+ if storedIdentity == nil {
+ return nil, fmt.Errorf("failed to verify the whitelist identity entry for instance ID: %q", instanceID)
+ }
+
+ // Ensure that role entry is not deleted
+ roleEntry, err := b.lockedAWSRole(req.Storage, storedIdentity.Role)
+ if err != nil {
+ return nil, err
+ }
+ if roleEntry == nil {
+ return nil, fmt.Errorf("role entry not found")
+ }
+
+ // If the login was made using the role tag, then max_ttl from tag
+ // is cached in internal data during login and used here to cap the
+ // max_ttl of renewal.
+ rTagMaxTTL, err := time.ParseDuration(req.Auth.Metadata["role_tag_max_ttl"])
+ if err != nil {
+ return nil, err
+ }
+
+ // Re-evaluate the maxTTL bounds
+ shortestMaxTTL := b.System().MaxLeaseTTL()
+ longestMaxTTL := b.System().MaxLeaseTTL()
+ if roleEntry.MaxTTL > time.Duration(0) && roleEntry.MaxTTL < shortestMaxTTL {
+ shortestMaxTTL = roleEntry.MaxTTL
+ }
+ if roleEntry.MaxTTL > longestMaxTTL {
+ longestMaxTTL = roleEntry.MaxTTL
+ }
+ if rTagMaxTTL > time.Duration(0) && rTagMaxTTL < shortestMaxTTL {
+ shortestMaxTTL = rTagMaxTTL
+ }
+ if rTagMaxTTL > longestMaxTTL {
+ longestMaxTTL = rTagMaxTTL
+ }
+
+ // Only LastUpdatedTime and ExpirationTime change and all other fields remain the same
+ currentTime := time.Now()
+ storedIdentity.LastUpdatedTime = currentTime
+ storedIdentity.ExpirationTime = currentTime.Add(longestMaxTTL)
+
+ // Updating the expiration time is required for the tidy operation on the
+ // whitelist identity storage items
+ if err = setWhitelistIdentityEntry(req.Storage, instanceID, storedIdentity); err != nil {
+ return nil, err
+ }
+
+ // If 'Period' is set on the role, then the token should never expire. Role
+ // tag does not have a 'Period' field. So, regarless of whether the token
+ // was issued using a role login or a role tag login, the period set on the
+ // role should take effect.
+ if roleEntry.Period > time.Duration(0) {
+ req.Auth.TTL = roleEntry.Period
+ return &logical.Response{Auth: req.Auth}, nil
+ } else {
+ // Cap the TTL value
+ shortestTTL := b.System().DefaultLeaseTTL()
+ if roleEntry.TTL > time.Duration(0) && roleEntry.TTL < shortestTTL {
+ shortestTTL = roleEntry.TTL
+ }
+ if shortestMaxTTL < shortestTTL {
+ shortestTTL = shortestMaxTTL
+ }
+ return framework.LeaseExtend(shortestTTL, shortestMaxTTL, b.System())(req, data)
+ }
+}
+
+func (b *backend) pathLoginUpdateIam(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+
+ method := data.Get("iam_http_request_method").(string)
+ if method == "" {
+ return logical.ErrorResponse("missing iam_http_request_method"), nil
+ }
+
+ // In the future, might consider supporting GET
+ if method != "POST" {
+ return logical.ErrorResponse("invalid iam_http_request_method; currently only 'POST' is supported"), nil
+ }
+
+ rawUrlB64 := data.Get("iam_request_url").(string)
+ if rawUrlB64 == "" {
+ return logical.ErrorResponse("missing iam_request_url"), nil
+ }
+ rawUrl, err := base64.StdEncoding.DecodeString(rawUrlB64)
+ if err != nil {
+ return logical.ErrorResponse("failed to base64 decode iam_request_url"), nil
+ }
+ parsedUrl, err := url.Parse(string(rawUrl))
+ if err != nil {
+ return logical.ErrorResponse("error parsing iam_request_url"), nil
+ }
+
+ // TODO: There are two potentially valid cases we're not yet supporting that would
+ // necessitate this check being changed. First, if we support GET requests.
+ // Second if we support presigned POST requests
+ bodyB64 := data.Get("iam_request_body").(string)
+ if bodyB64 == "" {
+ return logical.ErrorResponse("missing iam_request_body"), nil
+ }
+ bodyRaw, err := base64.StdEncoding.DecodeString(bodyB64)
+ if err != nil {
+ return logical.ErrorResponse("failed to base64 decode iam_request_body"), nil
+ }
+ body := string(bodyRaw)
+
+ headersB64 := data.Get("iam_request_headers").(string)
+ if headersB64 == "" {
+ return logical.ErrorResponse("missing iam_request_headers"), nil
+ }
+ headersJson, err := base64.StdEncoding.DecodeString(headersB64)
+ if err != nil {
+ return logical.ErrorResponse("failed to base64 decode iam_request_headers"), nil
+ }
+ var headers http.Header
+ err = jsonutil.DecodeJSON(headersJson, &headers)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("failed to JSON decode iam_request_headers %q: %v", headersJson, err)), nil
+ }
+
+ config, err := b.lockedClientConfigEntry(req.Storage)
+ if err != nil {
+ return logical.ErrorResponse("error getting configuration"), nil
+ }
+
+ endpoint := "https://sts.amazonaws.com"
+
+ if config != nil {
+ if config.IAMServerIdHeaderValue != "" {
+ err = validateVaultHeaderValue(headers, parsedUrl, config.IAMServerIdHeaderValue)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("error validating %s header: %v", iamServerIdHeader, err)), nil
+ }
+ }
+ if config.STSEndpoint != "" {
+ endpoint = config.STSEndpoint
+ }
+ }
+
+ clientArn, accountID, err := submitCallerIdentityRequest(method, endpoint, parsedUrl, body, headers)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("error making upstream request: %v", err)), nil
+ }
+ canonicalArn, principalName, sessionName, err := parseIamArn(clientArn)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("Error parsing arn: %v", err)), nil
+ }
+
+ roleName := data.Get("role").(string)
+ if roleName == "" {
+ roleName = principalName
+ }
+
+ roleEntry, err := b.lockedAWSRole(req.Storage, roleName)
+ if err != nil {
+ return nil, err
+ }
+ if roleEntry == nil {
+ return logical.ErrorResponse(fmt.Sprintf("entry for role %s not found", roleName)), nil
+ }
+
+ if roleEntry.AuthType != iamAuthType {
+ return logical.ErrorResponse(fmt.Sprintf("auth method iam not allowed for role %s", roleName)), nil
+ }
+
+ // The role creation should ensure that either we're inferring this is an EC2 instance
+ // or that we're binding an ARN
+ if roleEntry.BoundIamPrincipalARN != "" && roleEntry.BoundIamPrincipalARN != canonicalArn {
+ return logical.ErrorResponse(fmt.Sprintf("IAM Principal %q does not belong to the role %q", clientArn, roleName)), nil
+ }
+
+ policies := roleEntry.Policies
+
+ inferredEntityType := ""
+ inferredEntityId := ""
+ if roleEntry.InferredEntityType == ec2EntityType {
+ instance, err := b.validateInstance(req.Storage, sessionName, roleEntry.InferredAWSRegion, accountID)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("failed to verify %s as a valid EC2 instance in region %s", sessionName, roleEntry.InferredAWSRegion)), nil
+ }
+
+ // build a fake identity doc to pass on metadata about the instance to verifyInstanceMeetsRoleRequirements
+ identityDoc := &identityDocument{
+ Tags: nil, // Don't really need the tags, so not doing the work of converting them from Instance.Tags to identityDocument.Tags
+ InstanceID: *instance.InstanceId,
+ AmiID: *instance.ImageId,
+ AccountID: accountID,
+ Region: roleEntry.InferredAWSRegion,
+ PendingTime: instance.LaunchTime.Format(time.RFC3339),
+ }
+
+ validationError, err := b.verifyInstanceMeetsRoleRequirements(req.Storage, instance, roleEntry, roleName, identityDoc)
+ if err != nil {
+ return nil, err
+ }
+ if validationError != nil {
+ return logical.ErrorResponse(fmt.Sprintf("Error validating instance: %s", validationError)), nil
+ }
+
+ inferredEntityType = ec2EntityType
+ inferredEntityId = sessionName
+ }
+
+ resp := &logical.Response{
+ Auth: &logical.Auth{
+ Policies: policies,
+ Metadata: map[string]string{
+ "client_arn": clientArn,
+ "canonical_arn": canonicalArn,
+ "auth_type": iamAuthType,
+ "inferred_entity_type": inferredEntityType,
+ "inferred_entity_id": inferredEntityId,
+ "inferred_aws_region": roleEntry.InferredAWSRegion,
+ "account_id": accountID,
+ },
+ InternalData: map[string]interface{}{
+ "role_name": roleName,
+ },
+ DisplayName: principalName,
+ LeaseOptions: logical.LeaseOptions{
+ Renewable: true,
+ TTL: roleEntry.TTL,
+ },
+ },
+ }
+
+ if roleEntry.Period > time.Duration(0) {
+ resp.Auth.TTL = roleEntry.Period
+ } else {
+ shortestTTL := b.System().DefaultLeaseTTL()
+ if roleEntry.TTL > time.Duration(0) && roleEntry.TTL < shortestTTL {
+ shortestTTL = roleEntry.TTL
+ }
+
+ maxTTL := b.System().MaxLeaseTTL()
+ if roleEntry.MaxTTL > time.Duration(0) && roleEntry.MaxTTL < maxTTL {
+ maxTTL = roleEntry.MaxTTL
+ }
+
+ if shortestTTL > maxTTL {
+ resp.AddWarning(fmt.Sprintf("Effective TTL of %q exceeded the effective max_ttl of %q; TTL value is capped accordingly", (shortestTTL / time.Second).String(), (maxTTL / time.Second).String()))
+ shortestTTL = maxTTL
+ }
+
+ resp.Auth.TTL = shortestTTL
+ }
+
+ return resp, nil
+}
+
+// These two methods (hasValuesFor*) return two bools
+// The first is a hasAll, that is, does the request have all the values
+// necessary for this auth method
+// The second is a hasAny, that is, does the request have any of the fields
+// exclusive to this auth method
+func hasValuesForEc2Auth(data *framework.FieldData) (bool, bool) {
+ _, hasPkcs7 := data.GetOk("pkcs7")
+ _, hasIdentity := data.GetOk("identity")
+ _, hasSignature := data.GetOk("signature")
+ return (hasPkcs7 || (hasIdentity && hasSignature)), (hasPkcs7 || hasIdentity || hasSignature)
+}
+
+func hasValuesForIamAuth(data *framework.FieldData) (bool, bool) {
+ _, hasRequestMethod := data.GetOk("iam_http_request_method")
+ _, hasRequestUrl := data.GetOk("iam_request_url")
+ _, hasRequestBody := data.GetOk("iam_request_body")
+ _, hasRequestHeaders := data.GetOk("iam_request_headers")
+ return (hasRequestMethod && hasRequestUrl && hasRequestBody && hasRequestHeaders),
+ (hasRequestMethod || hasRequestUrl || hasRequestBody || hasRequestHeaders)
+}
+
+func parseIamArn(iamArn string) (string, string, string, error) {
+ // iamArn should look like one of the following:
+ // 1. arn:aws:iam:::user/
+ // 2. arn:aws:sts:::assumed-role//
+ // if we get something like 2, then we want to transform that back to what
+ // most people would expect, which is arn:aws:iam:::role/
+ fullParts := strings.Split(iamArn, ":")
+ principalFullName := fullParts[5]
+ // principalFullName would now be something like user/ or assumed-role//
+ parts := strings.Split(principalFullName, "/")
+ principalName := parts[1]
+ // now, principalName should either be or
+ transformedArn := iamArn
+ sessionName := ""
+ if parts[0] == "assumed-role" {
+ transformedArn = fmt.Sprintf("arn:aws:iam::%s:role/%s", fullParts[4], principalName)
+ // fullParts[4] is the
+ sessionName = parts[2]
+ // sessionName is
+ } else if parts[0] != "user" {
+ return "", "", "", fmt.Errorf("unrecognized principal type: %q", parts[0])
+ }
+ return transformedArn, principalName, sessionName, nil
+}
+
+func validateVaultHeaderValue(headers http.Header, requestUrl *url.URL, requiredHeaderValue string) error {
+ providedValue := ""
+ for k, v := range headers {
+ if strings.ToLower(iamServerIdHeader) == strings.ToLower(k) {
+ providedValue = strings.Join(v, ",")
+ break
+ }
+ }
+ if providedValue == "" {
+ return fmt.Errorf("didn't find %s", iamServerIdHeader)
+ }
+
+ // NOT doing a constant time compare here since the value is NOT intended to be secret
+ if providedValue != requiredHeaderValue {
+ return fmt.Errorf("expected %s but got %s", requiredHeaderValue, providedValue)
+ }
+
+ if authzHeaders, ok := headers["Authorization"]; ok {
+ // authzHeader looks like AWS4-HMAC-SHA256 Credential=AKI..., SignedHeaders=host;x-amz-date;x-vault-awsiam-id, Signature=...
+ // We need to extract out the SignedHeaders
+ re := regexp.MustCompile(".*SignedHeaders=([^,]+)")
+ authzHeader := strings.Join(authzHeaders, ",")
+ matches := re.FindSubmatch([]byte(authzHeader))
+ if len(matches) < 1 {
+ return fmt.Errorf("vault header wasn't signed")
+ }
+ if len(matches) > 2 {
+ return fmt.Errorf("found multiple SignedHeaders components")
+ }
+ signedHeaders := string(matches[1])
+ return ensureHeaderIsSigned(signedHeaders, iamServerIdHeader)
+ }
+ // TODO: If we support GET requests, then we need to parse the X-Amz-SignedHeaders
+ // argument out of the query string and search in there for the header value
+ return fmt.Errorf("missing Authorization header")
+}
+
+func buildHttpRequest(method, endpoint string, parsedUrl *url.URL, body string, headers http.Header) *http.Request {
+ // This is all a bit complicated because the AWS signature algorithm requires that
+ // the Host header be included in the signed headers. See
+ // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+ // The use cases we want to support, in order of increasing complexity, are:
+ // 1. All defaults (client assumes sts.amazonaws.com and server has no override)
+ // 2. Alternate STS regions: client wants to go to a specific region, in which case
+ // Vault must be confiugred with that endpoint as well. The client's signed request
+ // will include a signature over what the client expects the Host header to be,
+ // so we cannot change that and must match.
+ // 3. Alternate STS regions with a proxy that is transparent to Vault's clients.
+ // In this case, Vault is aware of the proxy, as the proxy is configured as the
+ // endpoint, but the clients should NOT be aware of the proxy (because STS will
+ // not be aware of the proxy)
+ // It's also annoying because:
+ // 1. The AWS Sigv4 algorithm requires the Host header to be defined
+ // 2. Some of the official SDKs (at least botocore and aws-sdk-go) don't actually
+ // incude an explicit Host header in the HTTP requests they generate, relying on
+ // the underlying HTTP library to do that for them.
+ // 3. To get a validly signed request, the SDKs check if a Host header has been set
+ // and, if not, add an inferred host header (based on the URI) to the internal
+ // data structure used for calculating the signature, but never actually expose
+ // that to clients. So then they just "hope" that the underlying library actually
+ // adds the right Host header which was included in the signature calculation.
+ // We could either explicity require all Vault clients to explicitly add the Host header
+ // in the encoded request, or we could also implicitly infer it from the URI.
+ // We choose to support both -- allow you to explicitly set a Host header, but if not,
+ // infer one from the URI.
+ // HOWEVER, we have to preserve the request URI portion of the client's
+ // URL because the GetCallerIdentity Action can be encoded in either the body
+ // or the URL. So, we need to rebuild the URL sent to the http library to have the
+ // custom, Vault-specified endpoint with the client-side request parameters.
+ targetUrl := fmt.Sprintf("%s/%s", endpoint, parsedUrl.RequestURI())
+ request, err := http.NewRequest(method, targetUrl, strings.NewReader(body))
+ if err != nil {
+ return nil
+ }
+ request.Host = parsedUrl.Host
+ for k, vals := range headers {
+ for _, val := range vals {
+ request.Header.Add(k, val)
+ }
+ }
+ return request
+}
+
+func ensureHeaderIsSigned(signedHeaders, headerToSign string) error {
+ // Not doing a constant time compare here, the values aren't secret
+ for _, header := range strings.Split(signedHeaders, ";") {
+ if header == strings.ToLower(headerToSign) {
+ return nil
+ }
+ }
+ return fmt.Errorf("vault header wasn't signed")
+}
+
+func parseGetCallerIdentityResponse(response string) (GetCallerIdentityResponse, error) {
+ decoder := xml.NewDecoder(strings.NewReader(response))
+ result := GetCallerIdentityResponse{}
+ err := decoder.Decode(&result)
+ return result, err
+}
+
+func submitCallerIdentityRequest(method, endpoint string, parsedUrl *url.URL, body string, headers http.Header) (string, string, error) {
+ // NOTE: We need to ensure we're calling STS, instead of acting as an unintended network proxy
+ // The protection against this is that this method will only call the endpoint specified in the
+ // client config (defaulting to sts.amazonaws.com), so it would require a Vault admin to override
+ // the endpoint to talk to alternate web addresses
+ request := buildHttpRequest(method, endpoint, parsedUrl, body, headers)
+ client := cleanhttp.DefaultClient()
+ response, err := client.Do(request)
+ if err != nil {
+ return "", "", fmt.Errorf("error making request: %v", err)
+ }
+ if response != nil {
+ defer response.Body.Close()
+ }
+ // we check for status code afterwards to also print out response body
+ responseBody, err := ioutil.ReadAll(response.Body)
+ if response.StatusCode != 200 {
+ return "", "", fmt.Errorf("received error code %s from STS: %s", response.StatusCode, string(responseBody))
+ }
+ callerIdentityResponse, err := parseGetCallerIdentityResponse(string(responseBody))
+ if err != nil {
+ return "", "", fmt.Errorf("error parsing STS response")
+ }
+ clientArn := callerIdentityResponse.GetCallerIdentityResult[0].Arn
+ if clientArn == "" {
+ return "", "", fmt.Errorf("no ARN validated")
+ }
+ return clientArn, callerIdentityResponse.GetCallerIdentityResult[0].Account, nil
+}
+
+type GetCallerIdentityResponse struct {
+ XMLName xml.Name `xml:"GetCallerIdentityResponse"`
+ GetCallerIdentityResult []GetCallerIdentityResult `xml:"GetCallerIdentityResult"`
+ ResponseMetadata []ResponseMetadata `xml:"ResponseMetadata"`
+}
+
+type GetCallerIdentityResult struct {
+ Arn string `xml:"Arn"`
+ UserId string `xml:"UserId"`
+ Account string `xml:"Account"`
+}
+
+type ResponseMetadata struct {
+ RequestId string `xml:"RequestId"`
+}
+
+// identityDocument represents the items of interest from the EC2 instance
+// identity document
+type identityDocument struct {
+ Tags map[string]interface{} `json:"tags,omitempty" structs:"tags" mapstructure:"tags"`
+ InstanceID string `json:"instanceId,omitempty" structs:"instanceId" mapstructure:"instanceId"`
+ AmiID string `json:"imageId,omitempty" structs:"imageId" mapstructure:"imageId"`
+ AccountID string `json:"accountId,omitempty" structs:"accountId" mapstructure:"accountId"`
+ Region string `json:"region,omitempty" structs:"region" mapstructure:"region"`
+ PendingTime string `json:"pendingTime,omitempty" structs:"pendingTime" mapstructure:"pendingTime"`
+}
+
+// roleTagLoginResponse represents the return values required after the process
+// of verifying a role tag login
+type roleTagLoginResponse struct {
+ Policies []string `json:"policies" structs:"policies" mapstructure:"policies"`
+ MaxTTL time.Duration `json:"max_ttl" structs:"max_ttl" mapstructure:"max_ttl"`
+ DisallowReauthentication bool `json:"disallow_reauthentication" structs:"disallow_reauthentication" mapstructure:"disallow_reauthentication"`
+}
+
+const iamServerIdHeader = "X-Vault-AWS-IAM-Server-ID"
+
+const pathLoginSyn = `
+Authenticates an EC2 instance with Vault.
+`
+
+const pathLoginDesc = `
+Authenticate AWS entities, either an arbitrary IAM principal or EC2 instances.
+
+IAM principals are authenticated by processing a signed sts:GetCallerIdentity
+request and then parsing the response to see who signed the request. Optionally,
+the caller can be inferred to be another AWS entity type, with EC2 instances
+the only currently supported entity type, and additional filtering can be
+implemented based on that inferred type.
+
+An EC2 instance is authenticated using the PKCS#7 signature of the instance identity
+document and a client created nonce. This nonce should be unique and should be used by
+the instance for all future logins, unless 'disallow_reauthenitcation' option on the
+registered role is enabled, in which case client nonce is optional.
+
+First login attempt, creates a whitelist entry in Vault associating the instance to the nonce
+provided. All future logins will succeed only if the client nonce matches the nonce in the
+whitelisted entry.
+
+By default, a cron task will periodically look for expired entries in the whitelist
+and deletes them. The duration to periodically run this, is one hour by default.
+However, this can be configured using the 'config/tidy/identities' endpoint. This tidy
+action can be triggered via the API as well, using the 'tidy/identities' endpoint.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_login_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_login_test.go
new file mode 100644
index 0000000..e96bed8
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_login_test.go
@@ -0,0 +1,140 @@
+package awsauth
+
+import (
+ "net/http"
+ "net/url"
+ "testing"
+)
+
+func TestBackend_pathLogin_getCallerIdentityResponse(t *testing.T) {
+ responseFromUser := `
+
+ arn:aws:iam::123456789012:user/MyUserName
+ ASOMETHINGSOMETHINGSOMETHING
+ 123456789012
+
+
+ 7f4fc40c-853a-11e6-8848-8d035d01eb87
+
+`
+ expectedUserArn := "arn:aws:iam::123456789012:user/MyUserName"
+
+ responseFromAssumedRole := `
+
+ arn:aws:sts::123456789012:assumed-role/RoleName/RoleSessionName
+ ASOMETHINGSOMETHINGELSE:RoleSessionName
+ 123456789012
+
+
+ 7f4fc40c-853a-11e6-8848-8d035d01eb87
+
+`
+ expectedRoleArn := "arn:aws:sts::123456789012:assumed-role/RoleName/RoleSessionName"
+
+ parsedUserResponse, err := parseGetCallerIdentityResponse(responseFromUser)
+ if parsed_arn := parsedUserResponse.GetCallerIdentityResult[0].Arn; parsed_arn != expectedUserArn {
+ t.Errorf("expected to parse arn %#v, got %#v", expectedUserArn, parsed_arn)
+ }
+
+ parsedRoleResponse, err := parseGetCallerIdentityResponse(responseFromAssumedRole)
+ if parsed_arn := parsedRoleResponse.GetCallerIdentityResult[0].Arn; parsed_arn != expectedRoleArn {
+ t.Errorf("expected to parn arn %#v; got %#v", expectedRoleArn, parsed_arn)
+ }
+
+ _, err = parseGetCallerIdentityResponse("SomeRandomGibberish")
+ if err == nil {
+ t.Errorf("expected to NOT parse random giberish, but didn't get an error")
+ }
+}
+
+func TestBackend_pathLogin_parseIamArn(t *testing.T) {
+ userArn := "arn:aws:iam::123456789012:user/MyUserName"
+ assumedRoleArn := "arn:aws:sts::123456789012:assumed-role/RoleName/RoleSessionName"
+ baseRoleArn := "arn:aws:iam::123456789012:role/RoleName"
+
+ xformedUser, principalFriendlyName, sessionName, err := parseIamArn(userArn)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if xformedUser != userArn {
+ t.Fatalf("expected to transform ARN %#v into %#v but got %#v instead", userArn, userArn, xformedUser)
+ }
+ if principalFriendlyName != "MyUserName" {
+ t.Fatalf("expected to extract MyUserName from ARN %#v but got %#v instead", userArn, principalFriendlyName)
+ }
+ if sessionName != "" {
+ t.Fatalf("expected to extract no session name from ARN %#v but got %#v instead", userArn, sessionName)
+ }
+
+ xformedRole, principalFriendlyName, sessionName, err := parseIamArn(assumedRoleArn)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if xformedRole != baseRoleArn {
+ t.Fatalf("expected to transform ARN %#v into %#v but got %#v instead", assumedRoleArn, baseRoleArn, xformedRole)
+ }
+ if principalFriendlyName != "RoleName" {
+ t.Fatalf("expected to extract principal name of RoleName from ARN %#v but got %#v instead", assumedRoleArn, sessionName)
+ }
+ if sessionName != "RoleSessionName" {
+ t.Fatalf("expected to extract role session name of RoleSessionName from ARN %#v but got %#v instead", assumedRoleArn, sessionName)
+ }
+}
+
+func TestBackend_validateVaultHeaderValue(t *testing.T) {
+ const canaryHeaderValue = "Vault-Server"
+ requestUrl, err := url.Parse("https://sts.amazonaws.com/")
+ if err != nil {
+ t.Fatalf("error parsing test URL: %v", err)
+ }
+ postHeadersMissing := http.Header{
+ "Host": []string{"Foo"},
+ "Authorization": []string{"AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request, SignedHeaders=content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"},
+ }
+ postHeadersInvalid := http.Header{
+ "Host": []string{"Foo"},
+ iamServerIdHeader: []string{"InvalidValue"},
+ "Authorization": []string{"AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request, SignedHeaders=content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"},
+ }
+ postHeadersUnsigned := http.Header{
+ "Host": []string{"Foo"},
+ iamServerIdHeader: []string{canaryHeaderValue},
+ "Authorization": []string{"AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature=5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"},
+ }
+ postHeadersValid := http.Header{
+ "Host": []string{"Foo"},
+ iamServerIdHeader: []string{canaryHeaderValue},
+ "Authorization": []string{"AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request, SignedHeaders=content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"},
+ }
+
+ postHeadersSplit := http.Header{
+ "Host": []string{"Foo"},
+ iamServerIdHeader: []string{canaryHeaderValue},
+ "Authorization": []string{"AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request", "SignedHeaders=content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"},
+ }
+
+ err = validateVaultHeaderValue(postHeadersMissing, requestUrl, canaryHeaderValue)
+ if err == nil {
+ t.Error("validated POST request with missing Vault header")
+ }
+
+ err = validateVaultHeaderValue(postHeadersInvalid, requestUrl, canaryHeaderValue)
+ if err == nil {
+ t.Error("validated POST request with invalid Vault header value")
+ }
+
+ err = validateVaultHeaderValue(postHeadersUnsigned, requestUrl, canaryHeaderValue)
+ if err == nil {
+ t.Error("validated POST request with unsigned Vault header")
+ }
+
+ err = validateVaultHeaderValue(postHeadersValid, requestUrl, canaryHeaderValue)
+ if err != nil {
+ t.Errorf("did NOT validate valid POST request: %v", err)
+ }
+
+ err = validateVaultHeaderValue(postHeadersSplit, requestUrl, canaryHeaderValue)
+ if err != nil {
+ t.Errorf("did NOT validate valid POST request with split Authorization header: %v", err)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role.go
new file mode 100644
index 0000000..f6c19f2
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role.go
@@ -0,0 +1,732 @@
+package awsauth
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/fatih/structs"
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/helper/policyutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathRole(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "role/" + framework.GenericNameRegex("role"),
+ Fields: map[string]*framework.FieldSchema{
+ "role": {
+ Type: framework.TypeString,
+ Description: "Name of the role.",
+ },
+ "auth_type": {
+ Type: framework.TypeString,
+ Description: `The auth_type permitted to authenticate to this role. Must be one of
+iam or ec2 and cannot be changed after role creation.`,
+ },
+ "bound_ami_id": {
+ Type: framework.TypeString,
+ Description: `If set, defines a constraint on the EC2 instances that they should be
+using the AMI ID specified by this parameter.`,
+ },
+ "bound_account_id": {
+ Type: framework.TypeString,
+ Description: `If set, defines a constraint on the EC2 instances that the account ID
+in its identity document to match the one specified by this parameter.`,
+ },
+ "bound_iam_principal_arn": {
+ Type: framework.TypeString,
+ Description: `ARN of the IAM principal to bind to this role. Only applicable when
+auth_type is iam.`,
+ },
+ "bound_region": {
+ Type: framework.TypeString,
+ Description: `If set, defines a constraint on the EC2 instances that the region in
+its identity document to match the one specified by this parameter. Only applicable when
+auth_type is ec2.`,
+ },
+ "bound_iam_role_arn": {
+ Type: framework.TypeString,
+ Description: `If set, defines a constraint on the authenticating EC2 instance
+that it must match the IAM role ARN specified by this parameter.
+The value is prefix-matched (as though it were a glob ending in
+'*'). The configured IAM user or EC2 instance role must be allowed
+to execute the 'iam:GetInstanceProfile' action if this is
+specified. This is only checked when auth_type is
+ec2.`,
+ },
+ "bound_iam_instance_profile_arn": {
+ Type: framework.TypeString,
+ Description: `If set, defines a constraint on the EC2 instances to be associated
+with an IAM instance profile ARN which has a prefix that matches
+the value specified by this parameter. The value is prefix-matched
+(as though it were a glob ending in '*'). This is only checked when
+auth_type is ec2.`,
+ },
+ "inferred_entity_type": {
+ Type: framework.TypeString,
+ Description: `When auth_type is iam, the
+AWS entity type to infer from the authenticated principal. The only supported
+value is ec2_instance, which will extract the EC2 instance ID from the
+authenticated role and apply the following restrictions specific to EC2
+instances: bound_ami_id, bound_account_id, bound_iam_role_arn,
+bound_iam_instance_profile_arn, bound_vpc_id, bound_subnet_id. The configured
+EC2 client must be able to find the inferred instance ID in the results, and the
+instance must be running. If unable to determine the EC2 instance ID or unable
+to find the EC2 instance ID among running instances, then authentication will
+fail.`,
+ },
+ "inferred_aws_region": {
+ Type: framework.TypeString,
+ Description: `When auth_type is iam and
+inferred_entity_type is set, the region to assume the inferred entity exists in.`,
+ },
+ "bound_vpc_id": {
+ Type: framework.TypeString,
+ Description: `
+If set, defines a constraint on the EC2 instance to be associated with the VPC
+ID that matches the value specified by this parameter.`,
+ },
+ "bound_subnet_id": {
+ Type: framework.TypeString,
+ Description: `
+If set, defines a constraint on the EC2 instance to be associated with the
+subnet ID that matches the value specified by this parameter.`,
+ },
+ "role_tag": {
+ Type: framework.TypeString,
+ Default: "",
+ Description: `If set, enables the role tags for this role. The value set for this
+field should be the 'key' of the tag on the EC2 instance. The 'value'
+of the tag should be generated using 'role//tag' endpoint.
+Defaults to an empty string, meaning that role tags are disabled. This
+is only allowed if auth_type is ec2.`,
+ },
+ "period": &framework.FieldSchema{
+ Type: framework.TypeDurationSecond,
+ Default: 0,
+ Description: `
+If set, indicates that the token generated using this role should never expire. The token should be renewed within the duration specified by this value. At each renewal, the token's TTL will be set to the value of this parameter.`,
+ },
+ "ttl": {
+ Type: framework.TypeDurationSecond,
+ Default: 0,
+ Description: `Duration in seconds after which the issued token should expire. Defaults
+to 0, in which case the value will fallback to the system/mount defaults.`,
+ },
+ "max_ttl": {
+ Type: framework.TypeDurationSecond,
+ Default: 0,
+ Description: "The maximum allowed lifetime of tokens issued using this role.",
+ },
+ "policies": {
+ Type: framework.TypeString,
+ Default: "default",
+ Description: "Policies to be set on tokens issued using this role.",
+ },
+ "allow_instance_migration": {
+ Type: framework.TypeBool,
+ Default: false,
+ Description: `If set, allows migration of the underlying instance where the client
+resides. This keys off of pendingTime in the metadata document, so
+essentially, this disables the client nonce check whenever the
+instance is migrated to a new host and pendingTime is newer than the
+previously-remembered time. Use with caution. This is only checked when
+auth_type is ec2.`,
+ },
+ "disallow_reauthentication": {
+ Type: framework.TypeBool,
+ Default: false,
+ Description: "If set, only allows a single token to be granted per instance ID. In order to perform a fresh login, the entry in whitelist for the instance ID needs to be cleared using 'auth/aws-ec2/identity-whitelist/' endpoint.",
+ },
+ },
+
+ ExistenceCheck: b.pathRoleExistenceCheck,
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.CreateOperation: b.pathRoleCreateUpdate,
+ logical.UpdateOperation: b.pathRoleCreateUpdate,
+ logical.ReadOperation: b.pathRoleRead,
+ logical.DeleteOperation: b.pathRoleDelete,
+ },
+
+ HelpSynopsis: pathRoleSyn,
+ HelpDescription: pathRoleDesc,
+ }
+}
+
+func pathListRole(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "role/?",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.pathRoleList,
+ },
+
+ HelpSynopsis: pathListRolesHelpSyn,
+ HelpDescription: pathListRolesHelpDesc,
+ }
+}
+
+func pathListRoles(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "roles/?",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.pathRoleList,
+ },
+
+ HelpSynopsis: pathListRolesHelpSyn,
+ HelpDescription: pathListRolesHelpDesc,
+ }
+}
+
+// Establishes dichotomy of request operation between CreateOperation and UpdateOperation.
+// Returning 'true' forces an UpdateOperation, CreateOperation otherwise.
+func (b *backend) pathRoleExistenceCheck(req *logical.Request, data *framework.FieldData) (bool, error) {
+ entry, err := b.lockedAWSRole(req.Storage, strings.ToLower(data.Get("role").(string)))
+ if err != nil {
+ return false, err
+ }
+ return entry != nil, nil
+}
+
+// lockedAWSRole returns the properties set on the given role. This method
+// acquires the read lock before reading the role from the storage.
+func (b *backend) lockedAWSRole(s logical.Storage, roleName string) (*awsRoleEntry, error) {
+ if roleName == "" {
+ return nil, fmt.Errorf("missing role name")
+ }
+
+ b.roleMutex.RLock()
+ roleEntry, err := b.nonLockedAWSRole(s, roleName)
+ // we manually unlock rather than defer the unlock because we might need to grab
+ // a read/write lock in the upgrade path
+ b.roleMutex.RUnlock()
+ if err != nil {
+ return nil, err
+ }
+ if roleEntry == nil {
+ return nil, nil
+ }
+ needUpgrade, err := upgradeRoleEntry(roleEntry)
+ if err != nil {
+ return nil, fmt.Errorf("error upgrading roleEntry: %v", err)
+ }
+ if needUpgrade {
+ b.roleMutex.Lock()
+ defer b.roleMutex.Unlock()
+ // Now that we have a R/W lock, we need to re-read the role entry in case it was
+ // written to between releasing the read lock and acquiring the write lock
+ roleEntry, err = b.nonLockedAWSRole(s, roleName)
+ if err != nil {
+ return nil, err
+ }
+ // somebody deleted the role, so no use in putting it back
+ if roleEntry == nil {
+ return nil, nil
+ }
+ // now re-check to see if we need to upgrade
+ if needUpgrade, err = upgradeRoleEntry(roleEntry); err != nil {
+ return nil, fmt.Errorf("error upgrading roleEntry: %v", err)
+ }
+ if needUpgrade {
+ if err = b.nonLockedSetAWSRole(s, roleName, roleEntry); err != nil {
+ return nil, fmt.Errorf("error saving upgraded roleEntry: %v", err)
+ }
+ }
+ }
+ return roleEntry, nil
+}
+
+// lockedSetAWSRole creates or updates a role in the storage. This method
+// acquires the write lock before creating or updating the role at the storage.
+func (b *backend) lockedSetAWSRole(s logical.Storage, roleName string, roleEntry *awsRoleEntry) error {
+ if roleName == "" {
+ return fmt.Errorf("missing role name")
+ }
+
+ if roleEntry == nil {
+ return fmt.Errorf("nil role entry")
+ }
+
+ b.roleMutex.Lock()
+ defer b.roleMutex.Unlock()
+
+ return b.nonLockedSetAWSRole(s, roleName, roleEntry)
+}
+
+// nonLockedSetAWSRole creates or updates a role in the storage. This method
+// does not acquire the write lock before reading the role from the storage. If
+// locking is desired, use lockedSetAWSRole instead.
+func (b *backend) nonLockedSetAWSRole(s logical.Storage, roleName string,
+ roleEntry *awsRoleEntry) error {
+ if roleName == "" {
+ return fmt.Errorf("missing role name")
+ }
+
+ if roleEntry == nil {
+ return fmt.Errorf("nil role entry")
+ }
+
+ entry, err := logical.StorageEntryJSON("role/"+strings.ToLower(roleName), roleEntry)
+ if err != nil {
+ return err
+ }
+
+ if err := s.Put(entry); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// If needed, updates the role entry and returns a bool indicating if it was updated
+// (and thus needs to be persisted)
+func upgradeRoleEntry(roleEntry *awsRoleEntry) (bool, error) {
+ if roleEntry == nil {
+ return false, fmt.Errorf("received nil roleEntry")
+ }
+ var upgraded bool
+ // Check if the value held by role ARN field is actually an instance profile ARN
+ if roleEntry.BoundIamRoleARN != "" && strings.Contains(roleEntry.BoundIamRoleARN, ":instance-profile/") {
+ // If yes, move it to the correct field
+ roleEntry.BoundIamInstanceProfileARN = roleEntry.BoundIamRoleARN
+
+ // Reset the old field
+ roleEntry.BoundIamRoleARN = ""
+
+ upgraded = true
+ }
+
+ // Check if there was no pre-existing AuthType set (from older versions)
+ if roleEntry.AuthType == "" {
+ // then default to the original behavior of ec2
+ roleEntry.AuthType = ec2AuthType
+ upgraded = true
+ }
+
+ return upgraded, nil
+
+}
+
+// nonLockedAWSRole returns the properties set on the given role. This method
+// does not acquire the read lock before reading the role from the storage. If
+// locking is desired, use lockedAWSRole instead.
+// This method also does NOT check to see if a role upgrade is required. It is
+// the responsibility of the caller to check if a role upgrade is required and,
+// if so, to upgrade the role
+func (b *backend) nonLockedAWSRole(s logical.Storage, roleName string) (*awsRoleEntry, error) {
+ if roleName == "" {
+ return nil, fmt.Errorf("missing role name")
+ }
+
+ entry, err := s.Get("role/" + strings.ToLower(roleName))
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result awsRoleEntry
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+// pathRoleDelete is used to delete the information registered for a given AMI ID.
+func (b *backend) pathRoleDelete(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role"), nil
+ }
+
+ b.roleMutex.Lock()
+ defer b.roleMutex.Unlock()
+
+ return nil, req.Storage.Delete("role/" + strings.ToLower(roleName))
+}
+
+// pathRoleList is used to list all the AMI IDs registered with Vault.
+func (b *backend) pathRoleList(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ b.roleMutex.RLock()
+ defer b.roleMutex.RUnlock()
+
+ roles, err := req.Storage.List("role/")
+ if err != nil {
+ return nil, err
+ }
+ return logical.ListResponse(roles), nil
+}
+
+// pathRoleRead is used to view the information registered for a given AMI ID.
+func (b *backend) pathRoleRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleEntry, err := b.lockedAWSRole(req.Storage, strings.ToLower(data.Get("role").(string)))
+ if err != nil {
+ return nil, err
+ }
+ if roleEntry == nil {
+ return nil, nil
+ }
+
+ // Prepare the map of all the entries in the roleEntry.
+ respData := structs.New(roleEntry).Map()
+
+ // HMAC key belonging to the role should NOT be exported.
+ delete(respData, "hmac_key")
+
+ // Display all the durations in seconds
+ respData["ttl"] = roleEntry.TTL / time.Second
+ respData["max_ttl"] = roleEntry.MaxTTL / time.Second
+ respData["period"] = roleEntry.Period / time.Second
+
+ return &logical.Response{
+ Data: respData,
+ }, nil
+}
+
+// pathRoleCreateUpdate is used to associate Vault policies to a given AMI ID.
+func (b *backend) pathRoleCreateUpdate(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+
+ roleName := strings.ToLower(data.Get("role").(string))
+ if roleName == "" {
+ return logical.ErrorResponse("missing role"), nil
+ }
+
+ b.roleMutex.Lock()
+ defer b.roleMutex.Unlock()
+
+ roleEntry, err := b.nonLockedAWSRole(req.Storage, roleName)
+ if err != nil {
+ return nil, err
+ }
+ if roleEntry == nil {
+ roleEntry = &awsRoleEntry{}
+ } else {
+ needUpdate, err := upgradeRoleEntry(roleEntry)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("failed to update roleEntry: %v", err)), nil
+ }
+ if needUpdate {
+ err = b.nonLockedSetAWSRole(req.Storage, roleName, roleEntry)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("failed to save upgraded roleEntry: %v", err)), nil
+ }
+ }
+ }
+
+ // Fetch and set the bound parameters. There can't be default values
+ // for these.
+ if boundAmiIDRaw, ok := data.GetOk("bound_ami_id"); ok {
+ roleEntry.BoundAmiID = boundAmiIDRaw.(string)
+ }
+
+ if boundAccountIDRaw, ok := data.GetOk("bound_account_id"); ok {
+ roleEntry.BoundAccountID = boundAccountIDRaw.(string)
+ }
+
+ if boundRegionRaw, ok := data.GetOk("bound_region"); ok {
+ roleEntry.BoundRegion = boundRegionRaw.(string)
+ }
+
+ if boundVpcIDRaw, ok := data.GetOk("bound_vpc_id"); ok {
+ roleEntry.BoundVpcID = boundVpcIDRaw.(string)
+ }
+
+ if boundSubnetIDRaw, ok := data.GetOk("bound_subnet_id"); ok {
+ roleEntry.BoundSubnetID = boundSubnetIDRaw.(string)
+ }
+
+ if boundIamRoleARNRaw, ok := data.GetOk("bound_iam_role_arn"); ok {
+ roleEntry.BoundIamRoleARN = boundIamRoleARNRaw.(string)
+ }
+
+ if boundIamInstanceProfileARNRaw, ok := data.GetOk("bound_iam_instance_profile_arn"); ok {
+ roleEntry.BoundIamInstanceProfileARN = boundIamInstanceProfileARNRaw.(string)
+ }
+
+ if boundIamPrincipalARNRaw, ok := data.GetOk("bound_iam_principal_arn"); ok {
+ roleEntry.BoundIamPrincipalARN = boundIamPrincipalARNRaw.(string)
+ }
+
+ if inferRoleTypeRaw, ok := data.GetOk("inferred_entity_type"); ok {
+ roleEntry.InferredEntityType = inferRoleTypeRaw.(string)
+ }
+
+ if inferredAWSRegionRaw, ok := data.GetOk("inferred_aws_region"); ok {
+ roleEntry.InferredAWSRegion = inferredAWSRegionRaw.(string)
+ }
+
+ // auth_type is a special case as it's immutable and can't be changed once a role is created
+ if authTypeRaw, ok := data.GetOk("auth_type"); ok {
+ // roleEntry.AuthType should only be "" when it's a new role; existing roles without an
+ // auth_type should have already been upgraded to have one before we get here
+ if roleEntry.AuthType == "" {
+ switch authTypeRaw.(string) {
+ case ec2AuthType, iamAuthType:
+ roleEntry.AuthType = authTypeRaw.(string)
+ default:
+ return logical.ErrorResponse(fmt.Sprintf("unrecognized auth_type: %v", authTypeRaw.(string))), nil
+ }
+ } else if authTypeRaw.(string) != roleEntry.AuthType {
+ return logical.ErrorResponse("changing auth_type on a role is not allowed"), nil
+ }
+ } else if req.Operation == logical.CreateOperation {
+ switch req.MountType {
+ // maintain backwards compatibility for old aws-ec2 auth types
+ case "aws-ec2":
+ roleEntry.AuthType = ec2AuthType
+ // but default to iamAuth for new mounts going forward
+ case "aws":
+ roleEntry.AuthType = iamAuthType
+ default:
+ roleEntry.AuthType = iamAuthType
+ }
+ }
+
+ allowEc2Binds := roleEntry.AuthType == ec2AuthType
+
+ if roleEntry.InferredEntityType != "" {
+ switch {
+ case roleEntry.AuthType != iamAuthType:
+ return logical.ErrorResponse("specified inferred_entity_type but didn't allow iam auth_type"), nil
+ case roleEntry.InferredEntityType != ec2EntityType:
+ return logical.ErrorResponse(fmt.Sprintf("specified invalid inferred_entity_type: %s", roleEntry.InferredEntityType)), nil
+ case roleEntry.InferredAWSRegion == "":
+ return logical.ErrorResponse("specified inferred_entity_type but not inferred_aws_region"), nil
+ }
+ allowEc2Binds = true
+ } else if roleEntry.InferredAWSRegion != "" {
+ return logical.ErrorResponse("specified inferred_aws_region but not inferred_entity_type"), nil
+ }
+
+ numBinds := 0
+
+ if roleEntry.BoundAccountID != "" {
+ if !allowEc2Binds {
+ return logical.ErrorResponse(fmt.Sprintf("specified bound_account_id but not allowing ec2 auth_type or inferring %s", ec2EntityType)), nil
+ }
+ numBinds++
+ }
+
+ if roleEntry.BoundRegion != "" {
+ if roleEntry.AuthType != ec2AuthType {
+ return logical.ErrorResponse("specified bound_region but not allowing ec2 auth_type"), nil
+ }
+ numBinds++
+ }
+
+ if roleEntry.BoundAmiID != "" {
+ if !allowEc2Binds {
+ return logical.ErrorResponse(fmt.Sprintf("specified bound_ami_id but not allowing ec2 auth_type or inferring %s", ec2EntityType)), nil
+ }
+ numBinds++
+ }
+
+ if roleEntry.BoundIamInstanceProfileARN != "" {
+ if !allowEc2Binds {
+ return logical.ErrorResponse(fmt.Sprintf("specified bound_iam_instance_profile_arn but not allowing ec2 auth_type or inferring %s", ec2EntityType)), nil
+ }
+ numBinds++
+ }
+
+ if roleEntry.BoundIamRoleARN != "" {
+ if !allowEc2Binds {
+ return logical.ErrorResponse(fmt.Sprintf("specified bound_iam_role_arn but not allowing ec2 auth_type or inferring %s", ec2EntityType)), nil
+ }
+ numBinds++
+ }
+
+ if roleEntry.BoundIamPrincipalARN != "" {
+ if roleEntry.AuthType != iamAuthType {
+ return logical.ErrorResponse("specified bound_iam_principal_arn but not allowing iam auth_type"), nil
+ }
+ numBinds++
+ }
+
+ if roleEntry.BoundVpcID != "" {
+ if !allowEc2Binds {
+ return logical.ErrorResponse(fmt.Sprintf("specified bound_vpc_id but not allowing ec2 auth_type or inferring %s", ec2EntityType)), nil
+ }
+ numBinds++
+ }
+
+ if roleEntry.BoundSubnetID != "" {
+ if !allowEc2Binds {
+ return logical.ErrorResponse(fmt.Sprintf("specified bound_subnet_id but not allowing ec2 auth_type or inferring %s", ec2EntityType)), nil
+ }
+ numBinds++
+ }
+
+ if numBinds == 0 {
+ return logical.ErrorResponse("at least be one bound parameter should be specified on the role"), nil
+ }
+
+ policiesStr, ok := data.GetOk("policies")
+ if ok {
+ roleEntry.Policies = policyutil.ParsePolicies(policiesStr.(string))
+ } else if req.Operation == logical.CreateOperation {
+ roleEntry.Policies = []string{"default"}
+ }
+
+ disallowReauthenticationBool, ok := data.GetOk("disallow_reauthentication")
+ if ok {
+ if roleEntry.AuthType != ec2AuthType {
+ return logical.ErrorResponse("specified disallow_reauthentication when not using ec2 auth type"), nil
+ }
+ roleEntry.DisallowReauthentication = disallowReauthenticationBool.(bool)
+ } else if req.Operation == logical.CreateOperation && roleEntry.AuthType == ec2AuthType {
+ roleEntry.DisallowReauthentication = data.Get("disallow_reauthentication").(bool)
+ }
+
+ allowInstanceMigrationBool, ok := data.GetOk("allow_instance_migration")
+ if ok {
+ if roleEntry.AuthType != ec2AuthType {
+ return logical.ErrorResponse("specified allow_instance_migration when not using ec2 auth type"), nil
+ }
+ roleEntry.AllowInstanceMigration = allowInstanceMigrationBool.(bool)
+ } else if req.Operation == logical.CreateOperation && roleEntry.AuthType == ec2AuthType {
+ roleEntry.AllowInstanceMigration = data.Get("allow_instance_migration").(bool)
+ }
+
+ var resp logical.Response
+
+ ttlRaw, ok := data.GetOk("ttl")
+ if ok {
+ ttl := time.Duration(ttlRaw.(int)) * time.Second
+ defaultLeaseTTL := b.System().DefaultLeaseTTL()
+ if ttl > defaultLeaseTTL {
+ resp.AddWarning(fmt.Sprintf("Given ttl of %d seconds greater than current mount/system default of %d seconds; ttl will be capped at login time", ttl/time.Second, defaultLeaseTTL/time.Second))
+ }
+ roleEntry.TTL = ttl
+ } else if req.Operation == logical.CreateOperation {
+ roleEntry.TTL = time.Duration(data.Get("ttl").(int)) * time.Second
+ }
+
+ maxTTLInt, ok := data.GetOk("max_ttl")
+ if ok {
+ maxTTL := time.Duration(maxTTLInt.(int)) * time.Second
+ systemMaxTTL := b.System().MaxLeaseTTL()
+ if maxTTL > systemMaxTTL {
+ resp.AddWarning(fmt.Sprintf("Given max_ttl of %d seconds greater than current mount/system default of %d seconds; max_ttl will be capped at login time", maxTTL/time.Second, systemMaxTTL/time.Second))
+ }
+
+ if maxTTL < time.Duration(0) {
+ return logical.ErrorResponse("max_ttl cannot be negative"), nil
+ }
+
+ roleEntry.MaxTTL = maxTTL
+ } else if req.Operation == logical.CreateOperation {
+ roleEntry.MaxTTL = time.Duration(data.Get("max_ttl").(int)) * time.Second
+ }
+
+ if roleEntry.MaxTTL != 0 && roleEntry.MaxTTL < roleEntry.TTL {
+ return logical.ErrorResponse("ttl should be shorter than max_ttl"), nil
+ }
+
+ periodRaw, ok := data.GetOk("period")
+ if ok {
+ roleEntry.Period = time.Second * time.Duration(periodRaw.(int))
+ } else if req.Operation == logical.CreateOperation {
+ roleEntry.Period = time.Second * time.Duration(data.Get("period").(int))
+ }
+
+ if roleEntry.Period > b.System().MaxLeaseTTL() {
+ return logical.ErrorResponse(fmt.Sprintf("'period' of '%s' is greater than the backend's maximum lease TTL of '%s'", roleEntry.Period.String(), b.System().MaxLeaseTTL().String())), nil
+ }
+
+ roleTagStr, ok := data.GetOk("role_tag")
+ if ok {
+ if roleEntry.AuthType != ec2AuthType {
+ return logical.ErrorResponse("tried to enable role_tag when not using ec2 auth method"), nil
+ }
+ roleEntry.RoleTag = roleTagStr.(string)
+ // There is a limit of 127 characters on the tag key for AWS EC2 instances.
+ // Complying to that requirement, do not allow the value of 'key' to be more than that.
+ if len(roleEntry.RoleTag) > 127 {
+ return logical.ErrorResponse("length of role tag exceeds the EC2 key limit of 127 characters"), nil
+ }
+ } else if req.Operation == logical.CreateOperation && roleEntry.AuthType == ec2AuthType {
+ roleEntry.RoleTag = data.Get("role_tag").(string)
+ }
+
+ if roleEntry.HMACKey == "" {
+ roleEntry.HMACKey, err = uuid.GenerateUUID()
+ if err != nil {
+ return nil, fmt.Errorf("failed to generate role HMAC key: %v", err)
+ }
+ }
+
+ if err := b.nonLockedSetAWSRole(req.Storage, roleName, roleEntry); err != nil {
+ return nil, err
+ }
+
+ if len(resp.Warnings()) == 0 {
+ return nil, nil
+ }
+
+ return &resp, nil
+}
+
+// Struct to hold the information associated with an AMI ID in Vault.
+type awsRoleEntry struct {
+ AuthType string `json:"auth_type" structs:"auth_type" mapstructure:"auth_type"`
+ BoundAmiID string `json:"bound_ami_id" structs:"bound_ami_id" mapstructure:"bound_ami_id"`
+ BoundAccountID string `json:"bound_account_id" structs:"bound_account_id" mapstructure:"bound_account_id"`
+ BoundIamPrincipalARN string `json:"bound_iam_principal_arn" structs:"bound_iam_principal_arn" mapstructure:"bound_iam_principal_arn"`
+ BoundIamRoleARN string `json:"bound_iam_role_arn" structs:"bound_iam_role_arn" mapstructure:"bound_iam_role_arn"`
+ BoundIamInstanceProfileARN string `json:"bound_iam_instance_profile_arn" structs:"bound_iam_instance_profile_arn" mapstructure:"bound_iam_instance_profile_arn"`
+ BoundRegion string `json:"bound_region" structs:"bound_region" mapstructure:"bound_region"`
+ BoundSubnetID string `json:"bound_subnet_id" structs:"bound_subnet_id" mapstructure:"bound_subnet_id"`
+ BoundVpcID string `json:"bound_vpc_id" structs:"bound_vpc_id" mapstructure:"bound_vpc_id"`
+ InferredEntityType string `json:"inferred_entity_type" structs:"inferred_entity_type" mapstructure:"inferred_entity_type"`
+ InferredAWSRegion string `json:"inferred_aws_region" structs:"inferred_aws_region" mapstructure:"inferred_aws_region"`
+ RoleTag string `json:"role_tag" structs:"role_tag" mapstructure:"role_tag"`
+ AllowInstanceMigration bool `json:"allow_instance_migration" structs:"allow_instance_migration" mapstructure:"allow_instance_migration"`
+ TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl"`
+ MaxTTL time.Duration `json:"max_ttl" structs:"max_ttl" mapstructure:"max_ttl"`
+ Policies []string `json:"policies" structs:"policies" mapstructure:"policies"`
+ DisallowReauthentication bool `json:"disallow_reauthentication" structs:"disallow_reauthentication" mapstructure:"disallow_reauthentication"`
+ HMACKey string `json:"hmac_key" structs:"hmac_key" mapstructure:"hmac_key"`
+ Period time.Duration `json:"period" mapstructure:"period" structs:"period"`
+}
+
+const pathRoleSyn = `
+Create a role and associate policies to it.
+`
+
+const pathRoleDesc = `
+A precondition for login is that a role should be created in the backend.
+The login endpoint takes in the role name against which the instance
+should be validated. After authenticating the instance, the authorization
+for the instance to access Vault's resources is determined by the policies
+that are associated to the role though this endpoint.
+
+When the instances require only a subset of policies on the role, then
+'role_tag' option on the role can be enabled to create a role tag via the
+endpoint 'role//tag'. This tag then needs to be applied on the
+instance before it attempts a login. The policies on the tag should be a
+subset of policies that are associated to the role. In order to enable
+login using tags, 'role_tag' option should be set while creating a role.
+This only applies when authenticating EC2 instances.
+
+Also, a 'max_ttl' can be configured in this endpoint that determines the maximum
+duration for which a login can be renewed. Note that the 'max_ttl' has an upper
+limit of the 'max_ttl' value on the backend's mount.
+`
+
+const pathListRolesHelpSyn = `
+Lists all the roles that are registered with Vault.
+`
+
+const pathListRolesHelpDesc = `
+Roles will be listed by their respective role names.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role_tag.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role_tag.go
new file mode 100644
index 0000000..5c8a119
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role_tag.go
@@ -0,0 +1,432 @@
+package awsauth
+
+import (
+ "crypto/hmac"
+ "crypto/sha256"
+ "crypto/subtle"
+ "encoding/base64"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/helper/policyutil"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+const roleTagVersion = "v1"
+
+func pathRoleTag(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "role/" + framework.GenericNameRegex("role") + "/tag$",
+ Fields: map[string]*framework.FieldSchema{
+ "role": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role.",
+ },
+
+ "instance_id": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `Instance ID for which this tag is intended for.
+If set, the created tag can only be used by the instance with the given ID.`,
+ },
+
+ "policies": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Policies to be associated with the tag. If set, must be a subset of the role's policies. If set, but set to an empty value, only the 'default' policy will be given to issued tokens.",
+ },
+
+ "max_ttl": &framework.FieldSchema{
+ Type: framework.TypeDurationSecond,
+ Default: 0,
+ Description: "If set, specifies the maximum allowed token lifetime.",
+ },
+
+ "allow_instance_migration": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: false,
+ Description: "If set, allows migration of the underlying instance where the client resides. This keys off of pendingTime in the metadata document, so essentially, this disables the client nonce check whenever the instance is migrated to a new host and pendingTime is newer than the previously-remembered time. Use with caution.",
+ },
+
+ "disallow_reauthentication": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: false,
+ Description: "If set, only allows a single token to be granted per instance ID. In order to perform a fresh login, the entry in whitelist for the instance ID needs to be cleared using the 'auth/aws-ec2/identity-whitelist/' endpoint.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathRoleTagUpdate,
+ },
+
+ HelpSynopsis: pathRoleTagSyn,
+ HelpDescription: pathRoleTagDesc,
+ }
+}
+
+// pathRoleTagUpdate is used to create an EC2 instance tag which will
+// identify the Vault resources that the instance will be authorized for.
+func (b *backend) pathRoleTagUpdate(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+
+ roleName := strings.ToLower(data.Get("role").(string))
+ if roleName == "" {
+ return logical.ErrorResponse("missing role"), nil
+ }
+
+ // Fetch the role entry
+ roleEntry, err := b.lockedAWSRole(req.Storage, roleName)
+ if err != nil {
+ return nil, err
+ }
+ if roleEntry == nil {
+ return logical.ErrorResponse(fmt.Sprintf("entry not found for role %s", roleName)), nil
+ }
+
+ // If RoleTag is empty, disallow creation of tag.
+ if roleEntry.RoleTag == "" {
+ return logical.ErrorResponse("tag creation is not enabled for this role"), nil
+ }
+
+ // There should be a HMAC key present in the role entry
+ if roleEntry.HMACKey == "" {
+ // Not being able to find the HMACKey is an internal error
+ return nil, fmt.Errorf("failed to find the HMAC key")
+ }
+
+ resp := &logical.Response{}
+
+ // Instance ID is an optional field.
+ instanceID := strings.ToLower(data.Get("instance_id").(string))
+
+ // If no policies field was not supplied, then the tag should inherit all the policies
+ // on the role. But, it was provided, but set to empty explicitly, only "default" policy
+ // should be inherited. So, by leaving the policies var unset to anything when it is not
+ // supplied, we ensure that it inherits all the policies on the role.
+ var policies []string
+ policiesStr, ok := data.GetOk("policies")
+ if ok {
+ policies = policyutil.ParsePolicies(policiesStr.(string))
+ }
+ if !strutil.StrListSubset(roleEntry.Policies, policies) {
+ resp.AddWarning("Policies on the tag are not a subset of the policies set on the role. Login will not be allowed with this tag unless the role policies are updated.")
+ }
+
+ // This is an optional field.
+ disallowReauthentication := data.Get("disallow_reauthentication").(bool)
+
+ // This is an optional field.
+ allowInstanceMigration := data.Get("allow_instance_migration").(bool)
+ if allowInstanceMigration && !roleEntry.AllowInstanceMigration {
+ resp.AddWarning("Role does not allow instance migration. Login will not be allowed with this tag unless the role value is updated.")
+ }
+
+ // max_ttl for the role tag should be less than the max_ttl set on the role.
+ maxTTL := time.Duration(data.Get("max_ttl").(int)) * time.Second
+
+ // max_ttl on the tag should not be greater than the system view's max_ttl value.
+ if maxTTL > b.System().MaxLeaseTTL() {
+ resp.AddWarning(fmt.Sprintf("Given max TTL of %d is greater than the mount maximum of %d seconds, and will be capped at login time.", maxTTL/time.Second, b.System().MaxLeaseTTL()/time.Second))
+ }
+ // If max_ttl is set for the role, check the bounds for tag's max_ttl value using that.
+ if roleEntry.MaxTTL != time.Duration(0) && maxTTL > roleEntry.MaxTTL {
+ resp.AddWarning(fmt.Sprintf("Given max TTL of %d is greater than the role maximum of %d seconds, and will be capped at login time.", maxTTL/time.Second, roleEntry.MaxTTL/time.Second))
+ }
+
+ if maxTTL < time.Duration(0) {
+ return logical.ErrorResponse("max_ttl cannot be negative"), nil
+ }
+
+ // Create a random nonce.
+ nonce, err := createRoleTagNonce()
+ if err != nil {
+ return nil, err
+ }
+
+ // Create a role tag out of all the information provided.
+ rTagValue, err := createRoleTagValue(&roleTag{
+ Version: roleTagVersion,
+ Role: roleName,
+ Nonce: nonce,
+ Policies: policies,
+ MaxTTL: maxTTL,
+ InstanceID: instanceID,
+ DisallowReauthentication: disallowReauthentication,
+ AllowInstanceMigration: allowInstanceMigration,
+ }, roleEntry)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return the key to be used for the tag and the value to be used for that tag key.
+ // This key value pair should be set on the EC2 instance.
+ resp.Data = map[string]interface{}{
+ "tag_key": roleEntry.RoleTag,
+ "tag_value": rTagValue,
+ }
+
+ return resp, nil
+}
+
+// createRoleTagValue prepares the plaintext version of the role tag,
+// and appends a HMAC of the plaintext value to it, before returning.
+func createRoleTagValue(rTag *roleTag, roleEntry *awsRoleEntry) (string, error) {
+ if rTag == nil {
+ return "", fmt.Errorf("nil role tag")
+ }
+
+ if roleEntry == nil {
+ return "", fmt.Errorf("nil role entry")
+ }
+
+ // Attach version, nonce, policies and maxTTL to the role tag value.
+ rTagPlaintext, err := prepareRoleTagPlaintextValue(rTag)
+ if err != nil {
+ return "", err
+ }
+
+ // Attach HMAC to tag's plaintext and return.
+ return appendHMAC(rTagPlaintext, roleEntry)
+}
+
+// Takes in the plaintext part of the role tag, creates a HMAC of it and returns
+// a role tag value containing both the plaintext part and the HMAC part.
+func appendHMAC(rTagPlaintext string, roleEntry *awsRoleEntry) (string, error) {
+ if rTagPlaintext == "" {
+ return "", fmt.Errorf("empty role tag plaintext string")
+ }
+
+ if roleEntry == nil {
+ return "", fmt.Errorf("nil role entry")
+ }
+
+ // Create the HMAC of the value
+ hmacB64, err := createRoleTagHMACBase64(roleEntry.HMACKey, rTagPlaintext)
+ if err != nil {
+ return "", err
+ }
+
+ // attach the HMAC to the value
+ rTagValue := fmt.Sprintf("%s:%s", rTagPlaintext, hmacB64)
+
+ // This limit of 255 is enforced on the EC2 instance. Hence complying to that here.
+ if len(rTagValue) > 255 {
+ return "", fmt.Errorf("role tag 'value' exceeding the limit of 255 characters")
+ }
+
+ return rTagValue, nil
+}
+
+// verifyRoleTagValue rebuilds the role tag's plaintext part, computes the HMAC
+// from it using the role specific HMAC key and compares it with the received HMAC.
+func verifyRoleTagValue(rTag *roleTag, roleEntry *awsRoleEntry) (bool, error) {
+ if rTag == nil {
+ return false, fmt.Errorf("nil role tag")
+ }
+
+ if roleEntry == nil {
+ return false, fmt.Errorf("nil role entry")
+ }
+
+ // Fetch the plaintext part of role tag
+ rTagPlaintext, err := prepareRoleTagPlaintextValue(rTag)
+ if err != nil {
+ return false, err
+ }
+
+ // Compute the HMAC of the plaintext
+ hmacB64, err := createRoleTagHMACBase64(roleEntry.HMACKey, rTagPlaintext)
+ if err != nil {
+ return false, err
+ }
+
+ return subtle.ConstantTimeCompare([]byte(rTag.HMAC), []byte(hmacB64)) == 1, nil
+}
+
+// prepareRoleTagPlaintextValue builds the role tag value without the HMAC in it.
+func prepareRoleTagPlaintextValue(rTag *roleTag) (string, error) {
+ if rTag == nil {
+ return "", fmt.Errorf("nil role tag")
+ }
+ if rTag.Version == "" {
+ return "", fmt.Errorf("missing version")
+ }
+ if rTag.Nonce == "" {
+ return "", fmt.Errorf("missing nonce")
+ }
+ if rTag.Role == "" {
+ return "", fmt.Errorf("missing role")
+ }
+
+ // Attach Version, Nonce, Role, DisallowReauthentication and AllowInstanceMigration
+ // fields to the role tag.
+ value := fmt.Sprintf("%s:%s:r=%s:d=%s:m=%s", rTag.Version, rTag.Nonce, rTag.Role, strconv.FormatBool(rTag.DisallowReauthentication), strconv.FormatBool(rTag.AllowInstanceMigration))
+
+ // Attach the policies only if they are specified.
+ if len(rTag.Policies) != 0 {
+ value = fmt.Sprintf("%s:p=%s", value, strings.Join(rTag.Policies, ","))
+ }
+
+ // Attach instance_id if set.
+ if rTag.InstanceID != "" {
+ value = fmt.Sprintf("%s:i=%s", value, rTag.InstanceID)
+ }
+
+ // Attach max_ttl if it is provided.
+ if int(rTag.MaxTTL.Seconds()) > 0 {
+ value = fmt.Sprintf("%s:t=%d", value, int(rTag.MaxTTL.Seconds()))
+ }
+
+ return value, nil
+}
+
+// Parses the tag from string form into a struct form. This method
+// also verifies the correctness of the parsed role tag.
+func (b *backend) parseAndVerifyRoleTagValue(s logical.Storage, tag string) (*roleTag, error) {
+ tagItems := strings.Split(tag, ":")
+
+ // Tag must contain version, nonce, policies and HMAC
+ if len(tagItems) < 4 {
+ return nil, fmt.Errorf("invalid tag")
+ }
+
+ rTag := &roleTag{}
+
+ // Cache the HMAC value. The last item in the collection.
+ rTag.HMAC = tagItems[len(tagItems)-1]
+
+ // Remove the HMAC from the list.
+ tagItems = tagItems[:len(tagItems)-1]
+
+ // Version will be the first element.
+ rTag.Version = tagItems[0]
+ if rTag.Version != roleTagVersion {
+ return nil, fmt.Errorf("invalid role tag version")
+ }
+
+ // Nonce will be the second element.
+ rTag.Nonce = tagItems[1]
+
+ // Delete the version and nonce from the list.
+ tagItems = tagItems[2:]
+
+ for _, tagItem := range tagItems {
+ var err error
+ switch {
+ case strings.Contains(tagItem, "i="):
+ rTag.InstanceID = strings.TrimPrefix(tagItem, "i=")
+ case strings.Contains(tagItem, "r="):
+ rTag.Role = strings.TrimPrefix(tagItem, "r=")
+ case strings.Contains(tagItem, "p="):
+ rTag.Policies = strings.Split(strings.TrimPrefix(tagItem, "p="), ",")
+ case strings.Contains(tagItem, "d="):
+ rTag.DisallowReauthentication, err = strconv.ParseBool(strings.TrimPrefix(tagItem, "d="))
+ if err != nil {
+ return nil, err
+ }
+ case strings.Contains(tagItem, "m="):
+ rTag.AllowInstanceMigration, err = strconv.ParseBool(strings.TrimPrefix(tagItem, "m="))
+ if err != nil {
+ return nil, err
+ }
+ case strings.Contains(tagItem, "t="):
+ rTag.MaxTTL, err = time.ParseDuration(fmt.Sprintf("%ss", strings.TrimPrefix(tagItem, "t=")))
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("unrecognized item %s in tag", tagItem)
+ }
+ }
+
+ if rTag.Role == "" {
+ return nil, fmt.Errorf("missing role name")
+ }
+
+ roleEntry, err := b.lockedAWSRole(s, rTag.Role)
+ if err != nil {
+ return nil, err
+ }
+ if roleEntry == nil {
+ return nil, fmt.Errorf("entry not found for %s", rTag.Role)
+ }
+
+ // Create a HMAC of the plaintext value of role tag and compare it with the given value.
+ verified, err := verifyRoleTagValue(rTag, roleEntry)
+ if err != nil {
+ return nil, err
+ }
+ if !verified {
+ return nil, fmt.Errorf("role tag signature verification failed")
+ }
+
+ return rTag, nil
+}
+
+// Creates base64 encoded HMAC using a per-role key.
+func createRoleTagHMACBase64(key, value string) (string, error) {
+ if key == "" {
+ return "", fmt.Errorf("invalid HMAC key")
+ }
+ hm := hmac.New(sha256.New, []byte(key))
+ hm.Write([]byte(value))
+
+ // base64 encode the hmac bytes.
+ return base64.StdEncoding.EncodeToString(hm.Sum(nil)), nil
+}
+
+// Creates a base64 encoded random nonce.
+func createRoleTagNonce() (string, error) {
+ if uuidBytes, err := uuid.GenerateRandomBytes(8); err != nil {
+ return "", err
+ } else {
+ return base64.StdEncoding.EncodeToString(uuidBytes), nil
+ }
+}
+
+// Struct roleTag represents a role tag in a struc form.
+type roleTag struct {
+ Version string `json:"version" structs:"version" mapstructure:"version"`
+ InstanceID string `json:"instance_id" structs:"instance_id" mapstructure:"instance_id"`
+ Nonce string `json:"nonce" structs:"nonce" mapstructure:"nonce"`
+ Policies []string `json:"policies" structs:"policies" mapstructure:"policies"`
+ MaxTTL time.Duration `json:"max_ttl" structs:"max_ttl" mapstructure:"max_ttl"`
+ Role string `json:"role" structs:"role" mapstructure:"role"`
+ HMAC string `json:"hmac" structs:"hmac" mapstructure:"hmac"`
+ DisallowReauthentication bool `json:"disallow_reauthentication" structs:"disallow_reauthentication" mapstructure:"disallow_reauthentication"`
+ AllowInstanceMigration bool `json:"allow_instance_migration" structs:"allow_instance_migration" mapstructure:"allow_instance_migration"`
+}
+
+func (rTag1 *roleTag) Equal(rTag2 *roleTag) bool {
+ return rTag1 != nil &&
+ rTag2 != nil &&
+ rTag1.Version == rTag2.Version &&
+ rTag1.Nonce == rTag2.Nonce &&
+ policyutil.EquivalentPolicies(rTag1.Policies, rTag2.Policies) &&
+ rTag1.MaxTTL == rTag2.MaxTTL &&
+ rTag1.Role == rTag2.Role &&
+ rTag1.HMAC == rTag2.HMAC &&
+ rTag1.InstanceID == rTag2.InstanceID &&
+ rTag1.DisallowReauthentication == rTag2.DisallowReauthentication &&
+ rTag1.AllowInstanceMigration == rTag2.AllowInstanceMigration
+}
+
+const pathRoleTagSyn = `
+Create a tag on a role in order to be able to further restrict the capabilities of a role.
+`
+
+const pathRoleTagDesc = `
+If there are needs to apply only a subset of role's capabilities to any specific
+instance, create a role tag using this endpoint and attach the tag on the instance
+before performing login.
+
+To be able to create a role tag, the 'role_tag' option on the role should be
+enabled via the endpoint 'role/'. Also, the policies to be associated
+with the tag should be a subset of the policies associated with the registered role.
+
+This endpoint will return both the 'key' and the 'value' of the tag to be set
+on the EC2 instance.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role_test.go
new file mode 100644
index 0000000..52ff435
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role_test.go
@@ -0,0 +1,556 @@
+package awsauth
+
+import (
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/helper/policyutil"
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestBackend_pathRoleEc2(t *testing.T) {
+ config := logical.TestBackendConfig()
+ storage := &logical.InmemStorage{}
+ config.StorageView = storage
+
+ b, err := Backend(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = b.Setup(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data := map[string]interface{}{
+ "auth_type": "ec2",
+ "policies": "p,q,r,s",
+ "max_ttl": "2h",
+ "bound_ami_id": "ami-abcd123",
+ }
+ resp, err := b.HandleRequest(&logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "role/ami-abcd123",
+ Data: data,
+ Storage: storage,
+ })
+ if resp != nil && resp.IsError() {
+ t.Fatalf("failed to create role")
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "role/ami-abcd123",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil || resp.IsError() {
+ t.Fatal("failed to read the role entry")
+ }
+ if !policyutil.EquivalentPolicies(strings.Split(data["policies"].(string), ","), resp.Data["policies"].([]string)) {
+ t.Fatalf("bad: policies: expected: %#v\ngot: %#v\n", data, resp.Data)
+ }
+
+ data["allow_instance_migration"] = true
+ data["disallow_reauthentication"] = true
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "role/ami-abcd123",
+ Data: data,
+ Storage: storage,
+ })
+ if resp != nil && resp.IsError() {
+ t.Fatalf("failed to create role: %s", resp.Data["error"])
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "role/ami-abcd123",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !resp.Data["allow_instance_migration"].(bool) || !resp.Data["disallow_reauthentication"].(bool) {
+ t.Fatal("bad: expected:true got:false\n")
+ }
+
+ // add another entry, to test listing of role entries
+ data["bound_ami_id"] = "ami-abcd456"
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "role/ami-abcd456",
+ Data: data,
+ Storage: storage,
+ })
+ if resp != nil && resp.IsError() {
+ t.Fatalf("failed to create role: %s", resp.Data["error"])
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.ListOperation,
+ Path: "roles",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil || resp.Data == nil || resp.IsError() {
+ t.Fatalf("failed to list the role entries")
+ }
+ keys := resp.Data["keys"].([]string)
+ if len(keys) != 2 {
+ t.Fatalf("bad: keys: %#v\n", keys)
+ }
+
+ _, err = b.HandleRequest(&logical.Request{
+ Operation: logical.DeleteOperation,
+ Path: "role/ami-abcd123",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "role/ami-abcd123",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: response: expected:nil actual:%#v\n", resp)
+ }
+
+}
+
+func TestBackend_pathIam(t *testing.T) {
+ config := logical.TestBackendConfig()
+ storage := &logical.InmemStorage{}
+ config.StorageView = storage
+
+ b, err := Backend(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = b.Setup(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // make sure we start with empty roles, which gives us confidence that the read later
+ // actually is the two roles we created
+ resp, err := b.HandleRequest(&logical.Request{
+ Operation: logical.ListOperation,
+ Path: "roles",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil || resp.Data == nil || resp.IsError() {
+ t.Fatalf("failed to list role entries")
+ }
+ if resp.Data["keys"] != nil {
+ t.Fatalf("Received roles when expected none")
+ }
+
+ data := map[string]interface{}{
+ "auth_type": iamAuthType,
+ "policies": "p,q,r,s",
+ "max_ttl": "2h",
+ "bound_iam_principal_arn": "n:aws:iam::123456789012:user/MyUserName",
+ }
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "role/MyRoleName",
+ Data: data,
+ Storage: storage,
+ })
+
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp != nil && resp.IsError() {
+ t.Fatalf("failed to create the role entry; resp: %#v", resp)
+ }
+
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "role/MyRoleName",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil || resp.IsError() {
+ t.Fatal("failed to read the role entry")
+ }
+ if !policyutil.EquivalentPolicies(strings.Split(data["policies"].(string), ","), resp.Data["policies"].([]string)) {
+ t.Fatalf("bad: policies: expected %#v\ngot: %#v\n", data, resp.Data)
+ }
+
+ data["inferred_entity_type"] = "invalid"
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "role/ShouldNeverExist",
+ Data: data,
+ Storage: storage,
+ })
+ if resp == nil || !resp.IsError() {
+ t.Fatalf("Created role with invalid inferred_entity_type")
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data["inferred_entity_type"] = ec2EntityType
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "role/ShouldNeverExist",
+ Data: data,
+ Storage: storage,
+ })
+ if resp == nil || !resp.IsError() {
+ t.Fatalf("Created role without necessary inferred_aws_region")
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ delete(data, "bound_iam_principal_arn")
+ data["inferred_aws_region"] = "us-east-1"
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "role/ShouldNeverExist",
+ Data: data,
+ Storage: storage,
+ })
+ if resp == nil || !resp.IsError() {
+ t.Fatalf("Created role without anything bound")
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // generate a second role, ensure we're able to list both
+ data["bound_ami_id"] = "ami-abcd123"
+ secondRole := &logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "role/MyOtherRoleName",
+ Data: data,
+ Storage: storage,
+ }
+ resp, err = b.HandleRequest(secondRole)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp != nil && resp.IsError() {
+ t.Fatalf("failed to create additional role: %v", *secondRole)
+ }
+
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.ListOperation,
+ Path: "roles",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil || resp.Data == nil || resp.IsError() {
+ t.Fatalf("failed to list role entries")
+ }
+ keys := resp.Data["keys"].([]string)
+ if len(keys) != 2 {
+ t.Fatalf("bad: keys %#v\n", keys)
+ }
+
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.DeleteOperation,
+ Path: "role/MyOtherRoleName",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "role/MyOtherRoleName",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: response: expected: nil actual:%3v\n", resp)
+ }
+}
+
+func TestBackend_pathRoleMixedTypes(t *testing.T) {
+ config := logical.TestBackendConfig()
+ storage := &logical.InmemStorage{}
+ config.StorageView = storage
+
+ b, err := Backend(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = b.Setup(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data := map[string]interface{}{
+ "policies": "p,q,r,s",
+ "bound_ami_id": "ami-abc1234",
+ "auth_type": "ec2,invalid",
+ }
+
+ submitRequest := func(roleName string, op logical.Operation) (*logical.Response, error) {
+ return b.HandleRequest(&logical.Request{
+ Operation: op,
+ Path: "role/" + roleName,
+ Data: data,
+ Storage: storage,
+ })
+ }
+
+ resp, err := submitRequest("shouldNeverExist", logical.CreateOperation)
+ if resp == nil || !resp.IsError() {
+ t.Fatalf("created role with invalid auth_type; resp: %#v", resp)
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data["auth_type"] = "ec2,,iam"
+ resp, err = submitRequest("shouldNeverExist", logical.CreateOperation)
+ if resp == nil || !resp.IsError() {
+ t.Fatalf("created role mixed auth types")
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data["auth_type"] = ec2AuthType
+ resp, err = submitRequest("ec2_to_iam", logical.CreateOperation)
+ if resp != nil && resp.IsError() {
+ t.Fatalf("failed to create valid role; resp: %#v", resp)
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data["auth_type"] = iamAuthType
+ delete(data, "bound_ami_id")
+ data["bound_iam_principal_arn"] = "arn:aws:iam::123456789012:role/MyRole"
+ resp, err = submitRequest("ec2_to_iam", logical.UpdateOperation)
+ if resp == nil || !resp.IsError() {
+ t.Fatalf("changed auth type on the role")
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data["inferred_entity_type"] = ec2EntityType
+ data["inferred_aws_region"] = "us-east-1"
+ resp, err = submitRequest("multipleTypesInferred", logical.CreateOperation)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp.IsError() {
+ t.Fatalf("didn't allow creation of roles with only inferred bindings")
+ }
+}
+
+func TestAwsEc2_RoleCrud(t *testing.T) {
+ var err error
+ var resp *logical.Response
+ config := logical.TestBackendConfig()
+ storage := &logical.InmemStorage{}
+ config.StorageView = storage
+
+ b, err := Backend(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = b.Setup(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ role1Data := map[string]interface{}{
+ "auth_type": "ec2",
+ "bound_vpc_id": "testvpcid",
+ "allow_instance_migration": true,
+ "policies": "testpolicy1,testpolicy2",
+ }
+ roleReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Storage: storage,
+ Path: "role/role1",
+ Data: role1Data,
+ }
+
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("resp: %#v, err: %v", resp, err)
+ }
+
+ roleData := map[string]interface{}{
+ "auth_type": "ec2",
+ "bound_ami_id": "testamiid",
+ "bound_account_id": "testaccountid",
+ "bound_region": "testregion",
+ "bound_iam_role_arn": "testiamrolearn",
+ "bound_iam_instance_profile_arn": "testiaminstanceprofilearn",
+ "bound_subnet_id": "testsubnetid",
+ "bound_vpc_id": "testvpcid",
+ "role_tag": "testtag",
+ "allow_instance_migration": true,
+ "ttl": "10m",
+ "max_ttl": "20m",
+ "policies": "testpolicy1,testpolicy2",
+ "disallow_reauthentication": true,
+ "hmac_key": "testhmackey",
+ "period": "1m",
+ }
+
+ roleReq.Path = "role/testrole"
+ roleReq.Data = roleData
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("resp: %#v, err: %v", resp, err)
+ }
+
+ roleReq.Operation = logical.ReadOperation
+
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("resp: %#v, err: %v", resp, err)
+ }
+
+ expected := map[string]interface{}{
+ "auth_type": ec2AuthType,
+ "bound_ami_id": "testamiid",
+ "bound_account_id": "testaccountid",
+ "bound_region": "testregion",
+ "bound_iam_principal_arn": "",
+ "bound_iam_role_arn": "testiamrolearn",
+ "bound_iam_instance_profile_arn": "testiaminstanceprofilearn",
+ "bound_subnet_id": "testsubnetid",
+ "bound_vpc_id": "testvpcid",
+ "inferred_entity_type": "",
+ "inferred_aws_region": "",
+ "role_tag": "testtag",
+ "allow_instance_migration": true,
+ "ttl": time.Duration(600),
+ "max_ttl": time.Duration(1200),
+ "policies": []string{"default", "testpolicy1", "testpolicy2"},
+ "disallow_reauthentication": true,
+ "period": time.Duration(60),
+ }
+
+ if !reflect.DeepEqual(expected, resp.Data) {
+ t.Fatalf("bad: role data: expected: %#v\n actual: %#v", expected, resp.Data)
+ }
+
+ roleData["bound_vpc_id"] = "newvpcid"
+ roleReq.Operation = logical.UpdateOperation
+
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("resp: %#v, err: %v", resp, err)
+ }
+
+ roleReq.Operation = logical.ReadOperation
+
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("resp: %#v, err: %v", resp, err)
+ }
+
+ expected["bound_vpc_id"] = "newvpcid"
+
+ if !reflect.DeepEqual(expected, resp.Data) {
+ t.Fatalf("bad: role data: expected: %#v\n actual: %#v", expected, resp.Data)
+ }
+
+ roleReq.Operation = logical.DeleteOperation
+
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("resp: %#v, err: %v", resp, err)
+ }
+
+ if resp != nil {
+ t.Fatalf("failed to delete role entry")
+ }
+}
+
+func TestAwsEc2_RoleDurationSeconds(t *testing.T) {
+ config := logical.TestBackendConfig()
+ storage := &logical.InmemStorage{}
+ config.StorageView = storage
+
+ b, err := Backend(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = b.Setup(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ roleData := map[string]interface{}{
+ "auth_type": "ec2",
+ "bound_iam_instance_profile_arn": "testarn",
+ "ttl": "10s",
+ "max_ttl": "20s",
+ "period": "30s",
+ }
+
+ roleReq := &logical.Request{
+ Operation: logical.CreateOperation,
+ Storage: storage,
+ Path: "role/testrole",
+ Data: roleData,
+ }
+
+ resp, err := b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("resp: %#v, err: %v", resp, err)
+ }
+
+ roleReq.Operation = logical.ReadOperation
+
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("resp: %#v, err: %v", resp, err)
+ }
+
+ if int64(resp.Data["ttl"].(time.Duration)) != 10 {
+ t.Fatalf("bad: period; expected: 10, actual: %d", resp.Data["ttl"])
+ }
+ if int64(resp.Data["max_ttl"].(time.Duration)) != 20 {
+ t.Fatalf("bad: period; expected: 20, actual: %d", resp.Data["max_ttl"])
+ }
+ if int64(resp.Data["period"].(time.Duration)) != 30 {
+ t.Fatalf("bad: period; expected: 30, actual: %d", resp.Data["period"])
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_roletag_blacklist.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_roletag_blacklist.go
new file mode 100644
index 0000000..32fded8
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_roletag_blacklist.go
@@ -0,0 +1,257 @@
+package awsauth
+
+import (
+ "encoding/base64"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathRoletagBlacklist(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "roletag-blacklist/(?P.*)",
+ Fields: map[string]*framework.FieldSchema{
+ "role_tag": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `Role tag to be blacklisted. The tag can be supplied as-is. In order
+to avoid any encoding problems, it can be base64 encoded.`,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathRoletagBlacklistUpdate,
+ logical.ReadOperation: b.pathRoletagBlacklistRead,
+ logical.DeleteOperation: b.pathRoletagBlacklistDelete,
+ },
+
+ HelpSynopsis: pathRoletagBlacklistSyn,
+ HelpDescription: pathRoletagBlacklistDesc,
+ }
+}
+
+// Path to list all the blacklisted tags.
+func pathListRoletagBlacklist(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "roletag-blacklist/?",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.pathRoletagBlacklistsList,
+ },
+
+ HelpSynopsis: pathListRoletagBlacklistHelpSyn,
+ HelpDescription: pathListRoletagBlacklistHelpDesc,
+ }
+}
+
+// Lists all the blacklisted role tags.
+func (b *backend) pathRoletagBlacklistsList(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ b.blacklistMutex.RLock()
+ defer b.blacklistMutex.RUnlock()
+
+ tags, err := req.Storage.List("blacklist/roletag/")
+ if err != nil {
+ return nil, err
+ }
+
+ // Tags are base64 encoded before indexing to avoid problems
+ // with the path separators being present in the tag.
+ // Reverse it before returning the list response.
+ for i, keyB64 := range tags {
+ if key, err := base64.StdEncoding.DecodeString(keyB64); err != nil {
+ return nil, err
+ } else {
+ // Overwrite the result with the decoded string.
+ tags[i] = string(key)
+ }
+ }
+ return logical.ListResponse(tags), nil
+}
+
+// Fetch an entry from the role tag blacklist for a given tag.
+// This method takes a role tag in its original form and not a base64 encoded form.
+func (b *backend) lockedBlacklistRoleTagEntry(s logical.Storage, tag string) (*roleTagBlacklistEntry, error) {
+ b.blacklistMutex.RLock()
+ defer b.blacklistMutex.RUnlock()
+
+ return b.nonLockedBlacklistRoleTagEntry(s, tag)
+}
+
+func (b *backend) nonLockedBlacklistRoleTagEntry(s logical.Storage, tag string) (*roleTagBlacklistEntry, error) {
+ entry, err := s.Get("blacklist/roletag/" + base64.StdEncoding.EncodeToString([]byte(tag)))
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result roleTagBlacklistEntry
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+ return &result, nil
+}
+
+// Deletes an entry from the role tag blacklist for a given tag.
+func (b *backend) pathRoletagBlacklistDelete(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ b.blacklistMutex.Lock()
+ defer b.blacklistMutex.Unlock()
+
+ tag := data.Get("role_tag").(string)
+ if tag == "" {
+ return logical.ErrorResponse("missing role_tag"), nil
+ }
+
+ return nil, req.Storage.Delete("blacklist/roletag/" + base64.StdEncoding.EncodeToString([]byte(tag)))
+}
+
+// If the given role tag is blacklisted, returns the details of the blacklist entry.
+// Returns 'nil' otherwise.
+func (b *backend) pathRoletagBlacklistRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+
+ tag := data.Get("role_tag").(string)
+ if tag == "" {
+ return logical.ErrorResponse("missing role_tag"), nil
+ }
+
+ entry, err := b.lockedBlacklistRoleTagEntry(req.Storage, tag)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "creation_time": entry.CreationTime.Format(time.RFC3339Nano),
+ "expiration_time": entry.ExpirationTime.Format(time.RFC3339Nano),
+ },
+ }, nil
+}
+
+// pathRoletagBlacklistUpdate is used to blacklist a given role tag.
+// Before a role tag is blacklisted, the correctness of the plaintext part
+// in the role tag is verified using the associated HMAC.
+func (b *backend) pathRoletagBlacklistUpdate(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+
+ // The role_tag value provided, optionally can be base64 encoded.
+ tagInput := data.Get("role_tag").(string)
+ if tagInput == "" {
+ return logical.ErrorResponse("missing role_tag"), nil
+ }
+
+ tag := ""
+
+ // Try to base64 decode the value.
+ tagBytes, err := base64.StdEncoding.DecodeString(tagInput)
+ if err != nil {
+ // If the decoding failed, use the value as-is.
+ tag = tagInput
+ } else {
+ // If the decoding succeeded, use the decoded value.
+ tag = string(tagBytes)
+ }
+
+ // Parse and verify the role tag from string form to a struct form and verify it.
+ rTag, err := b.parseAndVerifyRoleTagValue(req.Storage, tag)
+ if err != nil {
+ return nil, err
+ }
+ if rTag == nil {
+ return logical.ErrorResponse("failed to verify the role tag and parse it"), nil
+ }
+
+ // Get the entry for the role mentioned in the role tag.
+ roleEntry, err := b.lockedAWSRole(req.Storage, rTag.Role)
+ if err != nil {
+ return nil, err
+ }
+ if roleEntry == nil {
+ return logical.ErrorResponse("role entry not found"), nil
+ }
+
+ b.blacklistMutex.Lock()
+ defer b.blacklistMutex.Unlock()
+
+ // Check if the role tag is already blacklisted. If yes, update it.
+ blEntry, err := b.nonLockedBlacklistRoleTagEntry(req.Storage, tag)
+ if err != nil {
+ return nil, err
+ }
+ if blEntry == nil {
+ blEntry = &roleTagBlacklistEntry{}
+ }
+
+ currentTime := time.Now()
+
+ // Check if this is a creation of blacklist entry.
+ if blEntry.CreationTime.IsZero() {
+ // Set the creation time for the blacklist entry.
+ // This should not be updated after setting it once.
+ // If blacklist operation is invoked more than once, only update the expiration time.
+ blEntry.CreationTime = currentTime
+ }
+
+ // Decide the expiration time based on the max_ttl values. Since this is
+ // restricting access, use the greatest duration, not the least.
+ maxDur := rTag.MaxTTL
+ if roleEntry.MaxTTL > maxDur {
+ maxDur = roleEntry.MaxTTL
+ }
+ if b.System().MaxLeaseTTL() > maxDur {
+ maxDur = b.System().MaxLeaseTTL()
+ }
+
+ blEntry.ExpirationTime = currentTime.Add(maxDur)
+
+ entry, err := logical.StorageEntryJSON("blacklist/roletag/"+base64.StdEncoding.EncodeToString([]byte(tag)), blEntry)
+ if err != nil {
+ return nil, err
+ }
+
+ // Store the blacklist entry.
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+type roleTagBlacklistEntry struct {
+ CreationTime time.Time `json:"creation_time" structs:"creation_time" mapstructure:"creation_time"`
+ ExpirationTime time.Time `json:"expiration_time" structs:"expiration_time" mapstructure:"expiration_time"`
+}
+
+const pathRoletagBlacklistSyn = `
+Blacklist a previously created role tag.
+`
+
+const pathRoletagBlacklistDesc = `
+Blacklist a role tag so that it cannot be used by any EC2 instance to perform further
+logins. This can be used if the role tag is suspected or believed to be possessed by
+an unintended party.
+
+By default, a cron task will periodically look for expired entries in the blacklist
+and deletes them. The duration to periodically run this, is one hour by default.
+However, this can be configured using the 'config/tidy/roletags' endpoint. This tidy
+action can be triggered via the API as well, using the 'tidy/roletags' endpoint.
+
+Also note that delete operation is supported on this endpoint to remove specific
+entries from the blacklist.
+`
+
+const pathListRoletagBlacklistHelpSyn = `
+Lists the blacklisted role tags.
+`
+
+const pathListRoletagBlacklistHelpDesc = `
+Lists all the entries present in the blacklist. This will show both the valid
+entries and the expired entries in the blacklist. Use 'tidy/roletags' endpoint
+to clean-up the blacklist of role tags based on expiration time.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_tidy_identity_whitelist.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_tidy_identity_whitelist.go
new file mode 100644
index 0000000..c77687f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_tidy_identity_whitelist.go
@@ -0,0 +1,96 @@
+package awsauth
+
+import (
+ "fmt"
+ "sync/atomic"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathTidyIdentityWhitelist(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "tidy/identity-whitelist$",
+ Fields: map[string]*framework.FieldSchema{
+ "safety_buffer": &framework.FieldSchema{
+ Type: framework.TypeDurationSecond,
+ Default: 259200,
+ Description: `The amount of extra time that must have passed beyond the identity's
+expiration, before it is removed from the backend storage.`,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathTidyIdentityWhitelistUpdate,
+ },
+
+ HelpSynopsis: pathTidyIdentityWhitelistSyn,
+ HelpDescription: pathTidyIdentityWhitelistDesc,
+ }
+}
+
+// tidyWhitelistIdentity is used to delete entries in the whitelist that are expired.
+func (b *backend) tidyWhitelistIdentity(s logical.Storage, safety_buffer int) error {
+ grabbed := atomic.CompareAndSwapUint32(&b.tidyWhitelistCASGuard, 0, 1)
+ if grabbed {
+ defer atomic.StoreUint32(&b.tidyWhitelistCASGuard, 0)
+ } else {
+ return fmt.Errorf("identity whitelist tidy operation already running")
+ }
+
+ bufferDuration := time.Duration(safety_buffer) * time.Second
+
+ identities, err := s.List("whitelist/identity/")
+ if err != nil {
+ return err
+ }
+
+ for _, instanceID := range identities {
+ identityEntry, err := s.Get("whitelist/identity/" + instanceID)
+ if err != nil {
+ return fmt.Errorf("error fetching identity of instanceID %s: %s", instanceID, err)
+ }
+
+ if identityEntry == nil {
+ return fmt.Errorf("identity entry for instanceID %s is nil", instanceID)
+ }
+
+ if identityEntry.Value == nil || len(identityEntry.Value) == 0 {
+ return fmt.Errorf("found identity entry for instanceID %s but actual identity is empty", instanceID)
+ }
+
+ var result whitelistIdentity
+ if err := identityEntry.DecodeJSON(&result); err != nil {
+ return err
+ }
+
+ if time.Now().After(result.ExpirationTime.Add(bufferDuration)) {
+ if err := s.Delete("whitelist/identity" + instanceID); err != nil {
+ return fmt.Errorf("error deleting identity of instanceID %s from storage: %s", instanceID, err)
+ }
+ }
+ }
+
+ return nil
+}
+
+// pathTidyIdentityWhitelistUpdate is used to delete entries in the whitelist that are expired.
+func (b *backend) pathTidyIdentityWhitelistUpdate(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ return nil, b.tidyWhitelistIdentity(req.Storage, data.Get("safety_buffer").(int))
+}
+
+const pathTidyIdentityWhitelistSyn = `
+Clean-up the whitelist instance identity entries.
+`
+
+const pathTidyIdentityWhitelistDesc = `
+When an instance identity is whitelisted, the expiration time of the whitelist
+entry is set based on the maximum 'max_ttl' value set on: the role, the role tag
+and the backend's mount.
+
+When this endpoint is invoked, all the entries that are expired will be deleted.
+A 'safety_buffer' (duration in seconds) can be provided, to ensure deletion of
+only those entries that are expired before 'safety_buffer' seconds.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_tidy_roletag_blacklist.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_tidy_roletag_blacklist.go
new file mode 100644
index 0000000..3970a18
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_tidy_roletag_blacklist.go
@@ -0,0 +1,95 @@
+package awsauth
+
+import (
+ "fmt"
+ "sync/atomic"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathTidyRoletagBlacklist(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "tidy/roletag-blacklist$",
+ Fields: map[string]*framework.FieldSchema{
+ "safety_buffer": &framework.FieldSchema{
+ Type: framework.TypeDurationSecond,
+ Default: 259200, // 72h
+ Description: `The amount of extra time that must have passed beyond the roletag
+expiration, before it is removed from the backend storage.`,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathTidyRoletagBlacklistUpdate,
+ },
+
+ HelpSynopsis: pathTidyRoletagBlacklistSyn,
+ HelpDescription: pathTidyRoletagBlacklistDesc,
+ }
+}
+
+// tidyBlacklistRoleTag is used to clean-up the entries in the role tag blacklist.
+func (b *backend) tidyBlacklistRoleTag(s logical.Storage, safety_buffer int) error {
+ grabbed := atomic.CompareAndSwapUint32(&b.tidyBlacklistCASGuard, 0, 1)
+ if grabbed {
+ defer atomic.StoreUint32(&b.tidyBlacklistCASGuard, 0)
+ } else {
+ return fmt.Errorf("roletag blacklist tidy operation already running")
+ }
+
+ bufferDuration := time.Duration(safety_buffer) * time.Second
+ tags, err := s.List("blacklist/roletag/")
+ if err != nil {
+ return err
+ }
+
+ for _, tag := range tags {
+ tagEntry, err := s.Get("blacklist/roletag/" + tag)
+ if err != nil {
+ return fmt.Errorf("error fetching tag %s: %s", tag, err)
+ }
+
+ if tagEntry == nil {
+ return fmt.Errorf("tag entry for tag %s is nil", tag)
+ }
+
+ if tagEntry.Value == nil || len(tagEntry.Value) == 0 {
+ return fmt.Errorf("found entry for tag %s but actual tag is empty", tag)
+ }
+
+ var result roleTagBlacklistEntry
+ if err := tagEntry.DecodeJSON(&result); err != nil {
+ return err
+ }
+
+ if time.Now().After(result.ExpirationTime.Add(bufferDuration)) {
+ if err := s.Delete("blacklist/roletag" + tag); err != nil {
+ return fmt.Errorf("error deleting tag %s from storage: %s", tag, err)
+ }
+ }
+ }
+
+ return nil
+}
+
+// pathTidyRoletagBlacklistUpdate is used to clean-up the entries in the role tag blacklist.
+func (b *backend) pathTidyRoletagBlacklistUpdate(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ return nil, b.tidyBlacklistRoleTag(req.Storage, data.Get("safety_buffer").(int))
+}
+
+const pathTidyRoletagBlacklistSyn = `
+Clean-up the blacklist role tag entries.
+`
+
+const pathTidyRoletagBlacklistDesc = `
+When a role tag is blacklisted, the expiration time of the blacklist entry is
+set based on the maximum 'max_ttl' value set on: the role, the role tag and the
+backend's mount.
+
+When this endpoint is invoked, all the entries that are expired will be deleted.
+A 'safety_buffer' (duration in seconds) can be provided, to ensure deletion of
+only those entries that are expired before 'safety_buffer' seconds.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/cert/backend.go
new file mode 100644
index 0000000..088cc41
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/backend.go
@@ -0,0 +1,75 @@
+package cert
+
+import (
+ "strings"
+ "sync"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
+ b := Backend()
+ _, err := b.Setup(conf)
+ if err != nil {
+ return b, err
+ }
+ return b, nil
+}
+
+func Backend() *backend {
+ var b backend
+ b.Backend = &framework.Backend{
+ Help: backendHelp,
+
+ PathsSpecial: &logical.Paths{
+ Unauthenticated: []string{
+ "login",
+ },
+ },
+
+ Paths: append([]*framework.Path{
+ pathConfig(&b),
+ pathLogin(&b),
+ pathListCerts(&b),
+ pathCerts(&b),
+ pathCRLs(&b),
+ }),
+
+ AuthRenew: b.pathLoginRenew,
+
+ Invalidate: b.invalidate,
+ }
+
+ b.crlUpdateMutex = &sync.RWMutex{}
+
+ return &b
+}
+
+type backend struct {
+ *framework.Backend
+ MapCertId *framework.PathMap
+
+ crls map[string]CRLInfo
+ crlUpdateMutex *sync.RWMutex
+}
+
+func (b *backend) invalidate(key string) {
+ switch {
+ case strings.HasPrefix(key, "crls/"):
+ b.crlUpdateMutex.Lock()
+ defer b.crlUpdateMutex.Unlock()
+ b.crls = nil
+ }
+}
+
+const backendHelp = `
+The "cert" credential provider allows authentication using
+TLS client certificates. A client connects to Vault and uses
+the "login" endpoint to generate a client token.
+
+Trusted certificates are configured using the "certs/" endpoint
+by a user with root access. A certificate authority can be trusted,
+which permits all keys signed by it. Alternatively, self-signed
+certificates can be trusted avoiding the need for a CA.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/cert/backend_test.go
new file mode 100644
index 0000000..f96c9cb
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/backend_test.go
@@ -0,0 +1,857 @@
+package cert
+
+import (
+ "crypto/tls"
+ "fmt"
+ "io/ioutil"
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/go-rootcerts"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ logicaltest "github.com/hashicorp/vault/logical/testing"
+ "github.com/mitchellh/mapstructure"
+)
+
+const (
+ serverCertPath = "test-fixtures/cacert.pem"
+ serverKeyPath = "test-fixtures/cakey.pem"
+ serverCAPath = serverCertPath
+
+ testRootCACertPath1 = "test-fixtures/testcacert1.pem"
+ testRootCAKeyPath1 = "test-fixtures/testcakey1.pem"
+ testCertPath1 = "test-fixtures/testissuedcert4.pem"
+ testKeyPath1 = "test-fixtures/testissuedkey4.pem"
+ testIssuedCertCRL = "test-fixtures/issuedcertcrl"
+
+ testRootCACertPath2 = "test-fixtures/testcacert2.pem"
+ testRootCAKeyPath2 = "test-fixtures/testcakey2.pem"
+ testRootCertCRL = "test-fixtures/cacert2crl"
+)
+
+// Unlike testConnState, this method does not use the same 'tls.Config' objects for
+// both dialing and listening. Instead, it runs the server without specifying its CA.
+// But the client, presents the CA cert of the server to trust the server.
+// The client can present a cert and key which is completely independent of server's CA.
+// The connection state returned will contain the certificate presented by the client.
+func connectionState(t *testing.T, serverCAPath, serverCertPath, serverKeyPath, clientCertPath, clientKeyPath string) tls.ConnectionState {
+ serverKeyPair, err := tls.LoadX509KeyPair(serverCertPath, serverKeyPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Prepare the listener configuration with server's key pair
+ listenConf := &tls.Config{
+ Certificates: []tls.Certificate{serverKeyPair},
+ ClientAuth: tls.RequestClientCert,
+ }
+
+ clientKeyPair, err := tls.LoadX509KeyPair(clientCertPath, clientKeyPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Load the CA cert required by the client to authenticate the server.
+ rootConfig := &rootcerts.Config{
+ CAFile: serverCAPath,
+ }
+ serverCAs, err := rootcerts.LoadCACerts(rootConfig)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Prepare the dial configuration that the client uses to establish the connection.
+ dialConf := &tls.Config{
+ Certificates: []tls.Certificate{clientKeyPair},
+ RootCAs: serverCAs,
+ }
+
+ // Start the server.
+ list, err := tls.Listen("tcp", "127.0.0.1:0", listenConf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer list.Close()
+
+ // Establish a connection from the client side and write a few bytes.
+ go func() {
+ addr := list.Addr().String()
+ conn, err := tls.Dial("tcp", addr, dialConf)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ defer conn.Close()
+
+ // Write ping
+ conn.Write([]byte("ping"))
+ }()
+
+ // Accept the connection on the server side.
+ serverConn, err := list.Accept()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer serverConn.Close()
+
+ // Read the ping
+ buf := make([]byte, 4)
+ serverConn.Read(buf)
+
+ // Grab the current state
+ connState := serverConn.(*tls.Conn).ConnectionState()
+ return connState
+}
+
+func TestBackend_RegisteredNonCA_CRL(t *testing.T) {
+ config := logical.TestBackendConfig()
+ storage := &logical.InmemStorage{}
+ config.StorageView = storage
+
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ nonCACert, err := ioutil.ReadFile(testCertPath1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Register the Non-CA certificate of the client key pair
+ certData := map[string]interface{}{
+ "certificate": nonCACert,
+ "policies": "abc",
+ "display_name": "cert1",
+ "ttl": 10000,
+ }
+ certReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "certs/cert1",
+ Storage: storage,
+ Data: certData,
+ }
+
+ resp, err := b.HandleRequest(certReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ // Connection state is presenting the client Non-CA cert and its key.
+ // This is exactly what is registered at the backend.
+ connState := connectionState(t, serverCAPath, serverCertPath, serverKeyPath, testCertPath1, testKeyPath1)
+ loginReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Storage: storage,
+ Path: "login",
+ Connection: &logical.Connection{
+ ConnState: &connState,
+ },
+ }
+ // Login should succeed.
+ resp, err = b.HandleRequest(loginReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ // Register a CRL containing the issued client certificate used above.
+ issuedCRL, err := ioutil.ReadFile(testIssuedCertCRL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ crlData := map[string]interface{}{
+ "crl": issuedCRL,
+ }
+ crlReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Storage: storage,
+ Path: "crls/issuedcrl",
+ Data: crlData,
+ }
+ resp, err = b.HandleRequest(crlReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ // Attempt login with the same connection state but with the CRL registered
+ resp, err = b.HandleRequest(loginReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil || !resp.IsError() {
+ t.Fatalf("expected failure due to revoked certificate")
+ }
+}
+
+func TestBackend_CRLs(t *testing.T) {
+ config := logical.TestBackendConfig()
+ storage := &logical.InmemStorage{}
+ config.StorageView = storage
+
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ clientCA1, err := ioutil.ReadFile(testRootCACertPath1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Register the CA certificate of the client key pair
+ certData := map[string]interface{}{
+ "certificate": clientCA1,
+ "policies": "abc",
+ "display_name": "cert1",
+ "ttl": 10000,
+ }
+
+ certReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "certs/cert1",
+ Storage: storage,
+ Data: certData,
+ }
+
+ resp, err := b.HandleRequest(certReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ // Connection state is presenting the client CA cert and its key.
+ // This is exactly what is registered at the backend.
+ connState := connectionState(t, serverCAPath, serverCertPath, serverKeyPath, testRootCACertPath1, testRootCAKeyPath1)
+ loginReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Storage: storage,
+ Path: "login",
+ Connection: &logical.Connection{
+ ConnState: &connState,
+ },
+ }
+ resp, err = b.HandleRequest(loginReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ // Now, without changing the registered client CA cert, present from
+ // the client side, a cert issued using the registered CA.
+ connState = connectionState(t, serverCAPath, serverCertPath, serverKeyPath, testCertPath1, testKeyPath1)
+ loginReq.Connection.ConnState = &connState
+
+ // Attempt login with the updated connection
+ resp, err = b.HandleRequest(loginReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ // Register a CRL containing the issued client certificate used above.
+ issuedCRL, err := ioutil.ReadFile(testIssuedCertCRL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ crlData := map[string]interface{}{
+ "crl": issuedCRL,
+ }
+
+ crlReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Storage: storage,
+ Path: "crls/issuedcrl",
+ Data: crlData,
+ }
+ resp, err = b.HandleRequest(crlReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ // Attempt login with the revoked certificate.
+ resp, err = b.HandleRequest(loginReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil || !resp.IsError() {
+ t.Fatalf("expected failure due to revoked certificate")
+ }
+
+ // Register a different client CA certificate.
+ clientCA2, err := ioutil.ReadFile(testRootCACertPath2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ certData["certificate"] = clientCA2
+ resp, err = b.HandleRequest(certReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ // Test login using a different client CA cert pair.
+ connState = connectionState(t, serverCAPath, serverCertPath, serverKeyPath, testRootCACertPath2, testRootCAKeyPath2)
+ loginReq.Connection.ConnState = &connState
+
+ // Attempt login with the updated connection
+ resp, err = b.HandleRequest(loginReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ // Register a CRL containing the root CA certificate used above.
+ rootCRL, err := ioutil.ReadFile(testRootCertCRL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ crlData["crl"] = rootCRL
+ resp, err = b.HandleRequest(crlReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ // Attempt login with the same connection state but with the CRL registered
+ resp, err = b.HandleRequest(loginReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil || !resp.IsError() {
+ t.Fatalf("expected failure due to revoked certificate")
+ }
+}
+
+func testFactory(t *testing.T) logical.Backend {
+ b, err := Factory(&logical.BackendConfig{
+ System: &logical.StaticSystemView{
+ DefaultLeaseTTLVal: 300 * time.Second,
+ MaxLeaseTTLVal: 1800 * time.Second,
+ },
+ StorageView: &logical.InmemStorage{},
+ })
+ if err != nil {
+ t.Fatalf("error: %s", err)
+ }
+ return b
+}
+
+// Test the certificates being registered to the backend
+func TestBackend_CertWrites(t *testing.T) {
+ // CA cert
+ ca1, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ // Non CA Cert
+ ca2, err := ioutil.ReadFile("test-fixtures/keys/cert.pem")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ // Non CA cert without TLS web client authentication
+ ca3, err := ioutil.ReadFile("test-fixtures/noclientauthcert.pem")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ tc := logicaltest.TestCase{
+ Backend: testFactory(t),
+ Steps: []logicaltest.TestStep{
+ testAccStepCert(t, "aaa", ca1, "foo", "", false),
+ testAccStepCert(t, "bbb", ca2, "foo", "", false),
+ testAccStepCert(t, "ccc", ca3, "foo", "", true),
+ },
+ }
+ tc.Steps = append(tc.Steps, testAccStepListCerts(t, []string{"aaa", "bbb"})...)
+ logicaltest.Test(t, tc)
+}
+
+// Test a client trusted by a CA
+func TestBackend_basic_CA(t *testing.T) {
+ connState := testConnState(t, "test-fixtures/keys/cert.pem",
+ "test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem")
+ ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: testFactory(t),
+ Steps: []logicaltest.TestStep{
+ testAccStepCert(t, "web", ca, "foo", "", false),
+ testAccStepLogin(t, connState),
+ testAccStepCertLease(t, "web", ca, "foo"),
+ testAccStepCertTTL(t, "web", ca, "foo"),
+ testAccStepLogin(t, connState),
+ testAccStepCertNoLease(t, "web", ca, "foo"),
+ testAccStepLoginDefaultLease(t, connState),
+ testAccStepCert(t, "web", ca, "foo", "*.example.com", false),
+ testAccStepLogin(t, connState),
+ testAccStepCert(t, "web", ca, "foo", "*.invalid.com", false),
+ testAccStepLoginInvalid(t, connState),
+ },
+ })
+}
+
+// Test CRL behavior
+func TestBackend_Basic_CRLs(t *testing.T) {
+ connState := testConnState(t, "test-fixtures/keys/cert.pem",
+ "test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem")
+ ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ crl, err := ioutil.ReadFile("test-fixtures/root/root.crl")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: testFactory(t),
+ Steps: []logicaltest.TestStep{
+ testAccStepCertNoLease(t, "web", ca, "foo"),
+ testAccStepLoginDefaultLease(t, connState),
+ testAccStepAddCRL(t, crl, connState),
+ testAccStepReadCRL(t, connState),
+ testAccStepLoginInvalid(t, connState),
+ testAccStepDeleteCRL(t, connState),
+ testAccStepLoginDefaultLease(t, connState),
+ },
+ })
+}
+
+// Test a self-signed client (root CA) that is trusted
+func TestBackend_basic_singleCert(t *testing.T) {
+ connState := testConnState(t, "test-fixtures/root/rootcacert.pem",
+ "test-fixtures/root/rootcakey.pem", "test-fixtures/root/rootcacert.pem")
+ ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: testFactory(t),
+ Steps: []logicaltest.TestStep{
+ testAccStepCert(t, "web", ca, "foo", "", false),
+ testAccStepLogin(t, connState),
+ testAccStepCert(t, "web", ca, "foo", "example.com", false),
+ testAccStepLogin(t, connState),
+ testAccStepCert(t, "web", ca, "foo", "invalid", false),
+ testAccStepLoginInvalid(t, connState),
+ },
+ })
+}
+
+// Test against a collection of matching and non-matching rules
+func TestBackend_mixed_constraints(t *testing.T) {
+ connState := testConnState(t, "test-fixtures/keys/cert.pem",
+ "test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem")
+ ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: testFactory(t),
+ Steps: []logicaltest.TestStep{
+ testAccStepCert(t, "1unconstrained", ca, "foo", "", false),
+ testAccStepCert(t, "2matching", ca, "foo", "*.example.com,whatever", false),
+ testAccStepCert(t, "3invalid", ca, "foo", "invalid", false),
+ testAccStepLogin(t, connState),
+ // Assumes CertEntries are processed in alphabetical order (due to store.List), so we only match 2matching if 1unconstrained doesn't match
+ testAccStepLoginWithName(t, connState, "2matching"),
+ testAccStepLoginWithNameInvalid(t, connState, "3invalid"),
+ },
+ })
+}
+
+// Test an untrusted client
+func TestBackend_untrusted(t *testing.T) {
+ connState := testConnState(t, "test-fixtures/keys/cert.pem",
+ "test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem")
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: testFactory(t),
+ Steps: []logicaltest.TestStep{
+ testAccStepLoginInvalid(t, connState),
+ },
+ })
+}
+
+func testAccStepAddCRL(t *testing.T, crl []byte, connState tls.ConnectionState) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "crls/test",
+ ConnState: &connState,
+ Data: map[string]interface{}{
+ "crl": crl,
+ },
+ }
+}
+
+func testAccStepReadCRL(t *testing.T, connState tls.ConnectionState) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "crls/test",
+ ConnState: &connState,
+ Check: func(resp *logical.Response) error {
+ crlInfo := CRLInfo{}
+ err := mapstructure.Decode(resp.Data, &crlInfo)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(crlInfo.Serials) != 1 {
+ t.Fatalf("bad: expected CRL with length 1, got %d", len(crlInfo.Serials))
+ }
+ if _, ok := crlInfo.Serials["637101449987587619778072672905061040630001617053"]; !ok {
+ t.Fatalf("bad: expected serial number not found in CRL")
+ }
+ return nil
+ },
+ }
+}
+
+func testAccStepDeleteCRL(t *testing.T, connState tls.ConnectionState) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.DeleteOperation,
+ Path: "crls/test",
+ ConnState: &connState,
+ }
+}
+
+func testAccStepLogin(t *testing.T, connState tls.ConnectionState) logicaltest.TestStep {
+ return testAccStepLoginWithName(t, connState, "")
+}
+
+func testAccStepLoginWithName(t *testing.T, connState tls.ConnectionState, certName string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "login",
+ Unauthenticated: true,
+ ConnState: &connState,
+ Check: func(resp *logical.Response) error {
+ if resp.Auth.TTL != 1000*time.Second {
+ t.Fatalf("bad lease length: %#v", resp.Auth)
+ }
+
+ if certName != "" && resp.Auth.DisplayName != ("mnt-"+certName) {
+ t.Fatalf("matched the wrong cert: %#v", resp.Auth.DisplayName)
+ }
+
+ fn := logicaltest.TestCheckAuth([]string{"default", "foo"})
+ return fn(resp)
+ },
+ Data: map[string]interface{}{
+ "name": certName,
+ },
+ }
+}
+
+func testAccStepLoginDefaultLease(t *testing.T, connState tls.ConnectionState) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "login",
+ Unauthenticated: true,
+ ConnState: &connState,
+ Check: func(resp *logical.Response) error {
+ if resp.Auth.TTL != 300*time.Second {
+ t.Fatalf("bad lease length: %#v", resp.Auth)
+ }
+
+ fn := logicaltest.TestCheckAuth([]string{"default", "foo"})
+ return fn(resp)
+ },
+ }
+}
+
+func testAccStepLoginInvalid(t *testing.T, connState tls.ConnectionState) logicaltest.TestStep {
+ return testAccStepLoginWithNameInvalid(t, connState, "")
+}
+
+func testAccStepLoginWithNameInvalid(t *testing.T, connState tls.ConnectionState, certName string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "login",
+ Unauthenticated: true,
+ ConnState: &connState,
+ Check: func(resp *logical.Response) error {
+ if resp.Auth != nil {
+ return fmt.Errorf("should not be authorized: %#v", resp)
+ }
+ return nil
+ },
+ Data: map[string]interface{}{
+ "name": certName,
+ },
+ ErrorOk: true,
+ }
+}
+
+func testAccStepListCerts(
+ t *testing.T, certs []string) []logicaltest.TestStep {
+ return []logicaltest.TestStep{
+ logicaltest.TestStep{
+ Operation: logical.ListOperation,
+ Path: "certs",
+ Check: func(resp *logical.Response) error {
+ if resp == nil {
+ return fmt.Errorf("nil response")
+ }
+ if resp.Data == nil {
+ return fmt.Errorf("nil data")
+ }
+ if resp.Data["keys"] == interface{}(nil) {
+ return fmt.Errorf("nil keys")
+ }
+ keys := resp.Data["keys"].([]string)
+ if !reflect.DeepEqual(keys, certs) {
+ return fmt.Errorf("mismatch: keys is %#v, certs is %#v", keys, certs)
+ }
+ return nil
+ },
+ }, logicaltest.TestStep{
+ Operation: logical.ListOperation,
+ Path: "certs/",
+ Check: func(resp *logical.Response) error {
+ if resp == nil {
+ return fmt.Errorf("nil response")
+ }
+ if resp.Data == nil {
+ return fmt.Errorf("nil data")
+ }
+ if resp.Data["keys"] == interface{}(nil) {
+ return fmt.Errorf("nil keys")
+ }
+ keys := resp.Data["keys"].([]string)
+ if !reflect.DeepEqual(keys, certs) {
+ return fmt.Errorf("mismatch: keys is %#v, certs is %#v", keys, certs)
+ }
+
+ return nil
+ },
+ },
+ }
+}
+
+func testAccStepCert(
+ t *testing.T, name string, cert []byte, policies string, allowedNames string, expectError bool) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "certs/" + name,
+ ErrorOk: expectError,
+ Data: map[string]interface{}{
+ "certificate": string(cert),
+ "policies": policies,
+ "display_name": name,
+ "allowed_names": allowedNames,
+ "lease": 1000,
+ },
+ Check: func(resp *logical.Response) error {
+ if resp == nil && expectError {
+ return fmt.Errorf("expected error but received nil")
+ }
+ return nil
+ },
+ }
+}
+
+func testAccStepCertLease(
+ t *testing.T, name string, cert []byte, policies string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "certs/" + name,
+ Data: map[string]interface{}{
+ "certificate": string(cert),
+ "policies": policies,
+ "display_name": name,
+ "lease": 1000,
+ },
+ }
+}
+
+func testAccStepCertTTL(
+ t *testing.T, name string, cert []byte, policies string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "certs/" + name,
+ Data: map[string]interface{}{
+ "certificate": string(cert),
+ "policies": policies,
+ "display_name": name,
+ "ttl": "1000s",
+ },
+ }
+}
+
+func testAccStepCertNoLease(
+ t *testing.T, name string, cert []byte, policies string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "certs/" + name,
+ Data: map[string]interface{}{
+ "certificate": string(cert),
+ "policies": policies,
+ "display_name": name,
+ },
+ }
+}
+
+func testConnState(t *testing.T, certPath, keyPath, rootCertPath string) tls.ConnectionState {
+ cert, err := tls.LoadX509KeyPair(certPath, keyPath)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ rootConfig := &rootcerts.Config{
+ CAFile: rootCertPath,
+ }
+ rootCAs, err := rootcerts.LoadCACerts(rootConfig)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ listenConf := &tls.Config{
+ Certificates: []tls.Certificate{cert},
+ ClientAuth: tls.RequestClientCert,
+ InsecureSkipVerify: false,
+ RootCAs: rootCAs,
+ }
+ dialConf := new(tls.Config)
+ *dialConf = *listenConf
+ list, err := tls.Listen("tcp", "127.0.0.1:0", listenConf)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ defer list.Close()
+
+ go func() {
+ addr := list.Addr().String()
+ conn, err := tls.Dial("tcp", addr, dialConf)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ defer conn.Close()
+
+ // Write ping
+ conn.Write([]byte("ping"))
+ }()
+
+ serverConn, err := list.Accept()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ defer serverConn.Close()
+
+ // Read the pign
+ buf := make([]byte, 4)
+ serverConn.Read(buf)
+
+ // Grab the current state
+ connState := serverConn.(*tls.Conn).ConnectionState()
+ return connState
+}
+
+func Test_Renew(t *testing.T) {
+ storage := &logical.InmemStorage{}
+
+ lb, err := Factory(&logical.BackendConfig{
+ System: &logical.StaticSystemView{
+ DefaultLeaseTTLVal: 300 * time.Second,
+ MaxLeaseTTLVal: 1800 * time.Second,
+ },
+ StorageView: storage,
+ })
+ if err != nil {
+ t.Fatalf("error: %s", err)
+ }
+
+ b := lb.(*backend)
+ connState := testConnState(t, "test-fixtures/keys/cert.pem",
+ "test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem")
+ ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ req := &logical.Request{
+ Connection: &logical.Connection{
+ ConnState: &connState,
+ },
+ Storage: storage,
+ Auth: &logical.Auth{},
+ }
+
+ fd := &framework.FieldData{
+ Raw: map[string]interface{}{
+ "name": "test",
+ "certificate": ca,
+ "policies": "foo,bar",
+ },
+ Schema: pathCerts(b).Fields,
+ }
+
+ resp, err := b.pathCertWrite(req, fd)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ empty_login_fd := &framework.FieldData{
+ Raw: map[string]interface{}{},
+ Schema: pathLogin(b).Fields,
+ }
+ resp, err = b.pathLogin(req, empty_login_fd)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp.IsError() {
+ t.Fatalf("got error: %#v", *resp)
+ }
+ req.Auth.InternalData = resp.Auth.InternalData
+ req.Auth.Metadata = resp.Auth.Metadata
+ req.Auth.LeaseOptions = resp.Auth.LeaseOptions
+ req.Auth.Policies = resp.Auth.Policies
+ req.Auth.IssueTime = time.Now()
+
+ // Normal renewal
+ resp, err = b.pathLoginRenew(req, empty_login_fd)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil {
+ t.Fatal("got nil response from renew")
+ }
+ if resp.IsError() {
+ t.Fatalf("got error: %#v", *resp)
+ }
+
+ // Change the policies -- this should fail
+ fd.Raw["policies"] = "zip,zap"
+ resp, err = b.pathCertWrite(req, fd)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resp, err = b.pathLoginRenew(req, empty_login_fd)
+ if err == nil {
+ t.Fatal("expected error")
+ }
+
+ // Put the policies back, this shold be okay
+ fd.Raw["policies"] = "bar,foo"
+ resp, err = b.pathCertWrite(req, fd)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resp, err = b.pathLoginRenew(req, empty_login_fd)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil {
+ t.Fatal("got nil response from renew")
+ }
+ if resp.IsError() {
+ t.Fatalf("got error: %#v", *resp)
+ }
+
+ // Delete CA, make sure we can't renew
+ resp, err = b.pathCertDelete(req, fd)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resp, err = b.pathLoginRenew(req, empty_login_fd)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil {
+ t.Fatal("got nil response from renew")
+ }
+ if !resp.IsError() {
+ t.Fatal("expected error")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/cli.go b/vendor/github.com/hashicorp/vault/builtin/credential/cert/cli.go
new file mode 100644
index 0000000..66809c2
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/cli.go
@@ -0,0 +1,56 @@
+package cert
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/api"
+ "github.com/mitchellh/mapstructure"
+)
+
+type CLIHandler struct{}
+
+func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) {
+ var data struct {
+ Mount string `mapstructure:"mount"`
+ Name string `mapstructure:"name"`
+ }
+ if err := mapstructure.WeakDecode(m, &data); err != nil {
+ return "", err
+ }
+
+ if data.Mount == "" {
+ data.Mount = "cert"
+ }
+
+ options := map[string]interface{}{
+ "name": data.Name,
+ }
+ path := fmt.Sprintf("auth/%s/login", data.Mount)
+ secret, err := c.Logical().Write(path, options)
+ if err != nil {
+ return "", err
+ }
+ if secret == nil {
+ return "", fmt.Errorf("empty response from credential provider")
+ }
+
+ return secret.Auth.ClientToken, nil
+}
+
+func (h *CLIHandler) Help() string {
+ help := `
+The "cert" credential provider allows you to authenticate with a
+client certificate. No other authentication materials are needed.
+Optionally, you may specify the specific certificate role to
+authenticate against with the "name" parameter.
+
+ Example: vault auth -method=cert \
+ -client-cert=/path/to/cert.pem \
+ -client-key=/path/to/key.pem
+ name=cert1
+
+ `
+
+ return strings.TrimSpace(help)
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_certs.go b/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_certs.go
new file mode 100644
index 0000000..2c002f6
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_certs.go
@@ -0,0 +1,226 @@
+package cert
+
+import (
+ "crypto/x509"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/hashicorp/vault/helper/policyutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathListCerts(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "certs/?",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.pathCertList,
+ },
+
+ HelpSynopsis: pathCertHelpSyn,
+ HelpDescription: pathCertHelpDesc,
+ }
+}
+
+func pathCerts(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "certs/" + framework.GenericNameRegex("name"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "The name of the certificate",
+ },
+
+ "certificate": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `The public certificate that should be trusted.
+Must be x509 PEM encoded.`,
+ },
+
+ "allowed_names": &framework.FieldSchema{
+ Type: framework.TypeCommaStringSlice,
+ Description: `A comma-separated list of names.
+At least one must exist in either the Common Name or SANs. Supports globbing.`,
+ },
+
+ "display_name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `The display name to use for clients using this
+certificate.`,
+ },
+
+ "policies": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Comma-seperated list of policies.",
+ },
+
+ "lease": &framework.FieldSchema{
+ Type: framework.TypeInt,
+ Description: `Deprecated: use "ttl" instead. TTL time in
+seconds. Defaults to system/backend default TTL.`,
+ },
+
+ "ttl": &framework.FieldSchema{
+ Type: framework.TypeDurationSecond,
+ Description: `TTL for tokens issued by this backend.
+Defaults to system/backend default TTL time.`,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.DeleteOperation: b.pathCertDelete,
+ logical.ReadOperation: b.pathCertRead,
+ logical.UpdateOperation: b.pathCertWrite,
+ },
+
+ HelpSynopsis: pathCertHelpSyn,
+ HelpDescription: pathCertHelpDesc,
+ }
+}
+
+func (b *backend) Cert(s logical.Storage, n string) (*CertEntry, error) {
+ entry, err := s.Get("cert/" + strings.ToLower(n))
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result CertEntry
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+ return &result, nil
+}
+
+func (b *backend) pathCertDelete(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ err := req.Storage.Delete("cert/" + strings.ToLower(d.Get("name").(string)))
+ if err != nil {
+ return nil, err
+ }
+ return nil, nil
+}
+
+func (b *backend) pathCertList(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ certs, err := req.Storage.List("cert/")
+ if err != nil {
+ return nil, err
+ }
+ return logical.ListResponse(certs), nil
+}
+
+func (b *backend) pathCertRead(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ cert, err := b.Cert(req.Storage, strings.ToLower(d.Get("name").(string)))
+ if err != nil {
+ return nil, err
+ }
+ if cert == nil {
+ return nil, nil
+ }
+
+ duration := cert.TTL
+ if duration == 0 {
+ duration = b.System().DefaultLeaseTTL()
+ }
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "certificate": cert.Certificate,
+ "display_name": cert.DisplayName,
+ "policies": strings.Join(cert.Policies, ","),
+ "ttl": duration / time.Second,
+ },
+ }, nil
+}
+
+func (b *backend) pathCertWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ name := strings.ToLower(d.Get("name").(string))
+ certificate := d.Get("certificate").(string)
+ displayName := d.Get("display_name").(string)
+ policies := policyutil.ParsePolicies(d.Get("policies").(string))
+ allowedNames := d.Get("allowed_names").([]string)
+
+ // Default the display name to the certificate name if not given
+ if displayName == "" {
+ displayName = name
+ }
+
+ parsed := parsePEM([]byte(certificate))
+ if len(parsed) == 0 {
+ return logical.ErrorResponse("failed to parse certificate"), nil
+ }
+
+ // If the certificate is not a CA cert, then ensure that x509.ExtKeyUsageClientAuth is set
+ if !parsed[0].IsCA && parsed[0].ExtKeyUsage != nil {
+ var clientAuth bool
+ for _, usage := range parsed[0].ExtKeyUsage {
+ if usage == x509.ExtKeyUsageClientAuth || usage == x509.ExtKeyUsageAny {
+ clientAuth = true
+ break
+ }
+ }
+ if !clientAuth {
+ return logical.ErrorResponse("non-CA certificates should have TLS client authentication set as an extended key usage"), nil
+ }
+ }
+
+ certEntry := &CertEntry{
+ Name: name,
+ Certificate: certificate,
+ DisplayName: displayName,
+ Policies: policies,
+ AllowedNames: allowedNames,
+ }
+
+ // Parse the lease duration or default to backend/system default
+ maxTTL := b.System().MaxLeaseTTL()
+ ttl := time.Duration(d.Get("ttl").(int)) * time.Second
+ if ttl == time.Duration(0) {
+ ttl = time.Second * time.Duration(d.Get("lease").(int))
+ }
+ if ttl > maxTTL {
+ return logical.ErrorResponse(fmt.Sprintf("Given TTL of %d seconds greater than current mount/system default of %d seconds", ttl/time.Second, maxTTL/time.Second)), nil
+ }
+ if ttl > time.Duration(0) {
+ certEntry.TTL = ttl
+ }
+
+ // Store it
+ entry, err := logical.StorageEntryJSON("cert/"+name, certEntry)
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+ return nil, nil
+}
+
+type CertEntry struct {
+ Name string
+ Certificate string
+ DisplayName string
+ Policies []string
+ TTL time.Duration
+ AllowedNames []string
+}
+
+const pathCertHelpSyn = `
+Manage trusted certificates used for authentication.
+`
+
+const pathCertHelpDesc = `
+This endpoint allows you to create, read, update, and delete trusted certificates
+that are allowed to authenticate.
+
+Deleting a certificate will not revoke auth for prior authenticated connections.
+To do this, do a revoke on "login". If you don't need to revoke login immediately,
+then the next renew will cause the lease to expire.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_config.go b/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_config.go
new file mode 100644
index 0000000..9e946c6
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_config.go
@@ -0,0 +1,63 @@
+package cert
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathConfig(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "config",
+ Fields: map[string]*framework.FieldSchema{
+ "disable_binding": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: false,
+ Description: `If set, during renewal, skips the matching of presented client identity with the client identity used during login. Defaults to false.`,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathConfigWrite,
+ },
+ }
+}
+
+func (b *backend) pathConfigWrite(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ disableBinding := data.Get("disable_binding").(bool)
+
+ entry, err := logical.StorageEntryJSON("config", config{
+ DisableBinding: disableBinding,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+ return nil, nil
+}
+
+// Config returns the configuration for this backend.
+func (b *backend) Config(s logical.Storage) (*config, error) {
+ entry, err := s.Get("config")
+ if err != nil {
+ return nil, err
+ }
+
+ // Returning a default configuration if an entry is not found
+ var result config
+ if entry != nil {
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, fmt.Errorf("error reading configuration: %s", err)
+ }
+ }
+ return &result, nil
+}
+
+type config struct {
+ DisableBinding bool `json:"disable_binding"`
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_crls.go b/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_crls.go
new file mode 100644
index 0000000..234b93a
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_crls.go
@@ -0,0 +1,253 @@
+package cert
+
+import (
+ "crypto/x509"
+ "fmt"
+ "math/big"
+ "strings"
+
+ "github.com/fatih/structs"
+ "github.com/hashicorp/vault/helper/certutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathCRLs(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "crls/" + framework.GenericNameRegex("name"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "The name of the certificate",
+ },
+
+ "crl": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `The public certificate that should be trusted.
+May be DER or PEM encoded. Note: the expiration time
+is ignored; if the CRL is no longer valid, delete it
+using the same name as specified here.`,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.DeleteOperation: b.pathCRLDelete,
+ logical.ReadOperation: b.pathCRLRead,
+ logical.UpdateOperation: b.pathCRLWrite,
+ },
+
+ HelpSynopsis: pathCRLsHelpSyn,
+ HelpDescription: pathCRLsHelpDesc,
+ }
+}
+
+func (b *backend) populateCRLs(storage logical.Storage) error {
+ b.crlUpdateMutex.Lock()
+ defer b.crlUpdateMutex.Unlock()
+
+ if b.crls != nil {
+ return nil
+ }
+
+ b.crls = map[string]CRLInfo{}
+
+ keys, err := storage.List("crls/")
+ if err != nil {
+ return fmt.Errorf("error listing CRLs: %v", err)
+ }
+ if keys == nil || len(keys) == 0 {
+ return nil
+ }
+
+ for _, key := range keys {
+ entry, err := storage.Get("crls/" + key)
+ if err != nil {
+ b.crls = nil
+ return fmt.Errorf("error loading CRL %s: %v", key, err)
+ }
+ if entry == nil {
+ continue
+ }
+ var crlInfo CRLInfo
+ err = entry.DecodeJSON(&crlInfo)
+ if err != nil {
+ b.crls = nil
+ return fmt.Errorf("error decoding CRL %s: %v", key, err)
+ }
+ b.crls[key] = crlInfo
+ }
+
+ return nil
+}
+
+func (b *backend) findSerialInCRLs(serial *big.Int) map[string]RevokedSerialInfo {
+ b.crlUpdateMutex.RLock()
+ defer b.crlUpdateMutex.RUnlock()
+ ret := map[string]RevokedSerialInfo{}
+ for key, crl := range b.crls {
+ if crl.Serials == nil {
+ continue
+ }
+ if info, ok := crl.Serials[serial.String()]; ok {
+ ret[key] = info
+ }
+ }
+ return ret
+}
+
+func parseSerialString(input string) (*big.Int, error) {
+ ret := &big.Int{}
+
+ switch {
+ case strings.Count(input, ":") > 0:
+ serialBytes := certutil.ParseHexFormatted(input, ":")
+ if serialBytes == nil {
+ return nil, fmt.Errorf("error parsing serial %s", input)
+ }
+ ret.SetBytes(serialBytes)
+ case strings.Count(input, "-") > 0:
+ serialBytes := certutil.ParseHexFormatted(input, "-")
+ if serialBytes == nil {
+ return nil, fmt.Errorf("error parsing serial %s", input)
+ }
+ ret.SetBytes(serialBytes)
+ default:
+ var success bool
+ ret, success = ret.SetString(input, 0)
+ if !success {
+ return nil, fmt.Errorf("error parsing serial %s", input)
+ }
+ }
+
+ return ret, nil
+}
+
+func (b *backend) pathCRLDelete(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ name := strings.ToLower(d.Get("name").(string))
+ if name == "" {
+ return logical.ErrorResponse(`"name" parameter cannot be empty`), nil
+ }
+
+ if err := b.populateCRLs(req.Storage); err != nil {
+ return nil, err
+ }
+
+ b.crlUpdateMutex.Lock()
+ defer b.crlUpdateMutex.Unlock()
+
+ _, ok := b.crls[name]
+ if !ok {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "no such CRL %s", name,
+ )), nil
+ }
+
+ if err := req.Storage.Delete("crls/" + name); err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "error deleting crl %s: %v", name, err),
+ ), nil
+ }
+
+ delete(b.crls, name)
+
+ return nil, nil
+}
+
+func (b *backend) pathCRLRead(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ name := strings.ToLower(d.Get("name").(string))
+ if name == "" {
+ return logical.ErrorResponse(`"name" parameter must be set`), nil
+ }
+
+ if err := b.populateCRLs(req.Storage); err != nil {
+ return nil, err
+ }
+
+ b.crlUpdateMutex.RLock()
+ defer b.crlUpdateMutex.RUnlock()
+
+ var retData map[string]interface{}
+
+ crl, ok := b.crls[name]
+ if !ok {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "no such CRL %s", name,
+ )), nil
+ }
+
+ retData = structs.New(&crl).Map()
+
+ return &logical.Response{
+ Data: retData,
+ }, nil
+}
+
+func (b *backend) pathCRLWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ name := strings.ToLower(d.Get("name").(string))
+ if name == "" {
+ return logical.ErrorResponse(`"name" parameter cannot be empty`), nil
+ }
+ crl := d.Get("crl").(string)
+
+ certList, err := x509.ParseCRL([]byte(crl))
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("failed to parse CRL: %v", err)), nil
+ }
+ if certList == nil {
+ return logical.ErrorResponse("parsed CRL is nil"), nil
+ }
+
+ if err := b.populateCRLs(req.Storage); err != nil {
+ return nil, err
+ }
+
+ b.crlUpdateMutex.Lock()
+ defer b.crlUpdateMutex.Unlock()
+
+ crlInfo := CRLInfo{
+ Serials: map[string]RevokedSerialInfo{},
+ }
+ for _, revokedCert := range certList.TBSCertList.RevokedCertificates {
+ crlInfo.Serials[revokedCert.SerialNumber.String()] = RevokedSerialInfo{}
+ }
+
+ entry, err := logical.StorageEntryJSON("crls/"+name, crlInfo)
+ if err != nil {
+ return nil, err
+ }
+ if err = req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ b.crls[name] = crlInfo
+
+ return nil, nil
+}
+
+type CRLInfo struct {
+ Serials map[string]RevokedSerialInfo `json:"serials" structs:"serials" mapstructure:"serials"`
+}
+
+type RevokedSerialInfo struct {
+}
+
+const pathCRLsHelpSyn = `
+Manage Certificate Revocation Lists checked during authentication.
+`
+
+const pathCRLsHelpDesc = `
+This endpoint allows you to create, read, update, and delete the Certificate
+Revocation Lists checked during authentication.
+
+When any CRLs are in effect, any login will check the trust chains sent by a
+client against the submitted CRLs. Any chain containing a serial number revoked
+by one or more of the CRLs causes that chain to be marked as invalid for the
+authentication attempt. Conversely, *any* valid chain -- that is, a chain
+in which none of the serials are revoked by any CRL -- allows authentication.
+This allows authentication to succeed when interim parts of one chain have been
+revoked; for instance, if a certificate is signed by two intermediate CAs due to
+one of them expiring.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_login.go b/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_login.go
new file mode 100644
index 0000000..164bbe7
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_login.go
@@ -0,0 +1,356 @@
+package cert
+
+import (
+ "bytes"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/helper/certutil"
+ "github.com/hashicorp/vault/helper/policyutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+
+ "github.com/ryanuber/go-glob"
+)
+
+// ParsedCert is a certificate that has been configured as trusted
+type ParsedCert struct {
+ Entry *CertEntry
+ Certificates []*x509.Certificate
+}
+
+func pathLogin(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "login",
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "The name of the certificate role to authenticate against.",
+ },
+ },
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathLogin,
+ },
+ }
+}
+
+func (b *backend) pathLogin(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+
+ var matched *ParsedCert
+ if verifyResp, resp, err := b.verifyCredentials(req, data); err != nil {
+ return nil, err
+ } else if resp != nil {
+ return resp, nil
+ } else {
+ matched = verifyResp
+ }
+
+ if matched == nil {
+ return nil, nil
+ }
+
+ ttl := matched.Entry.TTL
+ if ttl == 0 {
+ ttl = b.System().DefaultLeaseTTL()
+ }
+
+ clientCerts := req.Connection.ConnState.PeerCertificates
+ if len(clientCerts) == 0 {
+ return logical.ErrorResponse("no client certificate found"), nil
+ }
+ skid := base64.StdEncoding.EncodeToString(clientCerts[0].SubjectKeyId)
+ akid := base64.StdEncoding.EncodeToString(clientCerts[0].AuthorityKeyId)
+
+ // Generate a response
+ resp := &logical.Response{
+ Auth: &logical.Auth{
+ InternalData: map[string]interface{}{
+ "subject_key_id": skid,
+ "authority_key_id": akid,
+ },
+ Policies: matched.Entry.Policies,
+ DisplayName: matched.Entry.DisplayName,
+ Metadata: map[string]string{
+ "cert_name": matched.Entry.Name,
+ "common_name": clientCerts[0].Subject.CommonName,
+ "subject_key_id": certutil.GetHexFormatted(clientCerts[0].SubjectKeyId, ":"),
+ "authority_key_id": certutil.GetHexFormatted(clientCerts[0].AuthorityKeyId, ":"),
+ },
+ LeaseOptions: logical.LeaseOptions{
+ Renewable: true,
+ TTL: ttl,
+ },
+ },
+ }
+ return resp, nil
+}
+
+func (b *backend) pathLoginRenew(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ config, err := b.Config(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+
+ if !config.DisableBinding {
+ var matched *ParsedCert
+ if verifyResp, resp, err := b.verifyCredentials(req, d); err != nil {
+ return nil, err
+ } else if resp != nil {
+ return resp, nil
+ } else {
+ matched = verifyResp
+ }
+
+ if matched == nil {
+ return nil, nil
+ }
+
+ clientCerts := req.Connection.ConnState.PeerCertificates
+ if len(clientCerts) == 0 {
+ return nil, fmt.Errorf("no client certificate found")
+ }
+ skid := base64.StdEncoding.EncodeToString(clientCerts[0].SubjectKeyId)
+ akid := base64.StdEncoding.EncodeToString(clientCerts[0].AuthorityKeyId)
+
+ // Certificate should not only match a registered certificate policy.
+ // Also, the identity of the certificate presented should match the identity of the certificate used during login
+ if req.Auth.InternalData["subject_key_id"] != skid && req.Auth.InternalData["authority_key_id"] != akid {
+ return nil, fmt.Errorf("client identity during renewal not matching client identity used during login")
+ }
+
+ }
+ // Get the cert and use its TTL
+ cert, err := b.Cert(req.Storage, req.Auth.Metadata["cert_name"])
+ if err != nil {
+ return nil, err
+ }
+ if cert == nil {
+ // User no longer exists, do not renew
+ return nil, nil
+ }
+
+ if !policyutil.EquivalentPolicies(cert.Policies, req.Auth.Policies) {
+ return nil, fmt.Errorf("policies have changed, not renewing")
+ }
+
+ return framework.LeaseExtend(cert.TTL, 0, b.System())(req, d)
+}
+
+func (b *backend) verifyCredentials(req *logical.Request, d *framework.FieldData) (*ParsedCert, *logical.Response, error) {
+ // Get the connection state
+ if req.Connection == nil || req.Connection.ConnState == nil {
+ return nil, logical.ErrorResponse("tls connection required"), nil
+ }
+ connState := req.Connection.ConnState
+
+ if connState.PeerCertificates == nil || len(connState.PeerCertificates) == 0 {
+ return nil, logical.ErrorResponse("client certificate must be supplied"), nil
+ }
+ clientCert := connState.PeerCertificates[0]
+
+ // Allow constraining the login request to a single CertEntry
+ certName := d.Get("name").(string)
+
+ // Load the trusted certificates
+ roots, trusted, trustedNonCAs := b.loadTrustedCerts(req.Storage, certName)
+
+ // If trustedNonCAs is not empty it means that client had registered a non-CA cert
+ // with the backend.
+ if len(trustedNonCAs) != 0 {
+ for _, trustedNonCA := range trustedNonCAs {
+ tCert := trustedNonCA.Certificates[0]
+ // Check for client cert being explicitly listed in the config (and matching other constraints)
+ if tCert.SerialNumber.Cmp(clientCert.SerialNumber) == 0 &&
+ bytes.Equal(tCert.AuthorityKeyId, clientCert.AuthorityKeyId) &&
+ b.matchesConstraints(clientCert, trustedNonCA.Certificates, trustedNonCA) {
+ return trustedNonCA, nil, nil
+ }
+ }
+ }
+
+ // Get the list of full chains matching the connection
+ trustedChains, err := validateConnState(roots, connState)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // If no trusted chain was found, client is not authenticated
+ if len(trustedChains) == 0 {
+ return nil, logical.ErrorResponse("invalid certificate or no client certificate supplied"), nil
+ }
+
+ // Search for a ParsedCert that intersects with the validated chains and any additional constraints
+ matches := make([]*ParsedCert, 0)
+ for _, trust := range trusted { // For each ParsedCert in the config
+ for _, tCert := range trust.Certificates { // For each certificate in the entry
+ for _, chain := range trustedChains { // For each root chain that we matched
+ for _, cCert := range chain { // For each cert in the matched chain
+ if tCert.Equal(cCert) && // ParsedCert intersects with matched chain
+ b.matchesConstraints(clientCert, chain, trust) { // validate client cert + matched chain against the config
+ // Add the match to the list
+ matches = append(matches, trust)
+ }
+ }
+ }
+ }
+ }
+
+ // Fail on no matches
+ if len(matches) == 0 {
+ return nil, logical.ErrorResponse("no chain matching all constraints could be found for this login certificate"), nil
+ }
+
+ // Return the first matching entry (for backwards compatibility, we continue to just pick one if multiple match)
+ return matches[0], nil, nil
+}
+
+func (b *backend) matchesConstraints(clientCert *x509.Certificate, trustedChain []*x509.Certificate, config *ParsedCert) bool {
+ // Default behavior (no names) is to allow all names
+ nameMatched := len(config.Entry.AllowedNames) == 0
+ // At least one pattern must match at least one name if any patterns are specified
+ for _, allowedName := range config.Entry.AllowedNames {
+ if glob.Glob(allowedName, clientCert.Subject.CommonName) {
+ nameMatched = true
+ }
+
+ for _, name := range clientCert.DNSNames {
+ if glob.Glob(allowedName, name) {
+ nameMatched = true
+ }
+ }
+
+ for _, name := range clientCert.EmailAddresses {
+ if glob.Glob(allowedName, name) {
+ nameMatched = true
+ }
+ }
+ }
+
+ return !b.checkForChainInCRLs(trustedChain) && nameMatched
+}
+
+// loadTrustedCerts is used to load all the trusted certificates from the backend
+func (b *backend) loadTrustedCerts(store logical.Storage, certName string) (pool *x509.CertPool, trusted []*ParsedCert, trustedNonCAs []*ParsedCert) {
+ pool = x509.NewCertPool()
+ trusted = make([]*ParsedCert, 0)
+ trustedNonCAs = make([]*ParsedCert, 0)
+ names, err := store.List("cert/")
+ if err != nil {
+ b.Logger().Error("cert: failed to list trusted certs", "error", err)
+ return
+ }
+ for _, name := range names {
+ // If we are trying to select a single CertEntry and this isn't it
+ if certName != "" && name != certName {
+ continue
+ }
+ entry, err := b.Cert(store, strings.TrimPrefix(name, "cert/"))
+ if err != nil {
+ b.Logger().Error("cert: failed to load trusted cert", "name", name, "error", err)
+ continue
+ }
+ parsed := parsePEM([]byte(entry.Certificate))
+ if len(parsed) == 0 {
+ b.Logger().Error("cert: failed to parse certificate", "name", name)
+ continue
+ }
+ if !parsed[0].IsCA {
+ trustedNonCAs = append(trustedNonCAs, &ParsedCert{
+ Entry: entry,
+ Certificates: parsed,
+ })
+ } else {
+ for _, p := range parsed {
+ pool.AddCert(p)
+ }
+
+ // Create a ParsedCert entry
+ trusted = append(trusted, &ParsedCert{
+ Entry: entry,
+ Certificates: parsed,
+ })
+ }
+ }
+ return
+}
+
+func (b *backend) checkForChainInCRLs(chain []*x509.Certificate) bool {
+ badChain := false
+ for _, cert := range chain {
+ badCRLs := b.findSerialInCRLs(cert.SerialNumber)
+ if len(badCRLs) != 0 {
+ badChain = true
+ break
+ }
+ }
+ return badChain
+}
+
+func (b *backend) checkForValidChain(chains [][]*x509.Certificate) bool {
+ for _, chain := range chains {
+ if !b.checkForChainInCRLs(chain) {
+ return true
+ }
+ }
+ return false
+}
+
+// parsePEM parses a PEM encoded x509 certificate
+func parsePEM(raw []byte) (certs []*x509.Certificate) {
+ for len(raw) > 0 {
+ var block *pem.Block
+ block, raw = pem.Decode(raw)
+ if block == nil {
+ break
+ }
+ if (block.Type != "CERTIFICATE" && block.Type != "TRUSTED CERTIFICATE") || len(block.Headers) != 0 {
+ continue
+ }
+
+ cert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ continue
+ }
+ certs = append(certs, cert)
+ }
+ return
+}
+
+// validateConnState is used to validate that the TLS client is authorized
+// by at trusted certificate. Most of this logic is lifted from the client
+// verification logic here: http://golang.org/src/crypto/tls/handshake_server.go
+// The trusted chains are returned.
+func validateConnState(roots *x509.CertPool, cs *tls.ConnectionState) ([][]*x509.Certificate, error) {
+ opts := x509.VerifyOptions{
+ Roots: roots,
+ Intermediates: x509.NewCertPool(),
+ KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
+ }
+
+ certs := cs.PeerCertificates
+ if len(certs) == 0 {
+ return nil, nil
+ }
+
+ if len(certs) > 1 {
+ for _, cert := range certs[1:] {
+ opts.Intermediates.AddCert(cert)
+ }
+ }
+
+ chains, err := certs[0].Verify(opts)
+ if err != nil {
+ if _, ok := err.(x509.UnknownAuthorityError); ok {
+ return nil, nil
+ }
+ return nil, errors.New("failed to verify client's certificate: " + err.Error())
+ }
+ return chains, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/cacert.pem b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/cacert.pem
new file mode 100644
index 0000000..9d9a385
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/cacert.pem
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDPjCCAiagAwIBAgIUXiEDuecwua9+j1XHLnconxQ/JBcwDQYJKoZIhvcNAQEL
+BQAwFjEUMBIGA1UEAxMLbXl2YXVsdC5jb20wIBcNMTYwNTAyMTYwMzU4WhgPMjA2
+NjA0MjAxNjA0MjhaMBYxFDASBgNVBAMTC215dmF1bHQuY29tMIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwWPjnTqnkc6acah+wWLmdTK0oCrf2687XVhx
+VP3IN897TYzkaBQ2Dn1UM2VEL71sE3OZSVm0UWs5n7UqRuDp6mvkvrT2q5zgh/bV
+zg9ZL1AI5H7dY2Rsor95I849ymFpXZooMgNtIQLxIeleBwzTnVSkFl8RqKM7NkjZ
+wvBafQEjSsYk9050Bu0GMLgFJYRo1LozJLbwIs5ykG5F5PWTMfRvLCgLBzixPb75
+unIJ29nL0yB7zzUdkM8CG1EX8NkjGLEnpRnPa7+RMf8bd10v84cr0JFCUQmoabks
+sqVyA825/1we2r5Y8blyXZVIr2lcPyGocLDxz1qT1MqxrNQIywIDAQABo4GBMH8w
+DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFBTo2I+W
+3Wb2MBe3OWuj5qCbafavMB8GA1UdIwQYMBaAFBTo2I+W3Wb2MBe3OWuj5qCbafav
+MBwGA1UdEQQVMBOCC215dmF1bHQuY29thwR/AAABMA0GCSqGSIb3DQEBCwUAA4IB
+AQAyjJzDMzf28yMgiu//2R6LD3+zuLHlfX8+p5JB7WDBT7CgSm89gzMRtD2DvqZQ
+6iLbZv/x7Td8bdLsOKf3LDCkZyOygJ0Sr9+6YZdc9heWO8tsO/SbcLhj9/vK8YyV
+5fJo+vECW8I5zQLeTKfPqJtTU0zFspv0WYCB96Hsbhd1hTfHmVgjBoxi0YuduAa8
+3EHuYPfTYkO3M4QJCoQ+3S6LXSTDqppd1KGAy7QhRU6shd29EpSVxhgqZ+CIOpZu
+3RgPOgPqfqcOD/v/SRPqhRf+P5O5Dc/N4ZXTZtfJbaY0qE+smpeQUskVQ2TrSqha
+UYpNk7+toZW3Gioo0lBD3gH2
+-----END CERTIFICATE-----
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/cacert2crl b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/cacert2crl
new file mode 100644
index 0000000..82db7a3
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/cacert2crl
@@ -0,0 +1,12 @@
+-----BEGIN X509 CRL-----
+MIIBrjCBlzANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDEwtteXZhdWx0LmNvbRcN
+MTYwNTAyMTYxNDMzWhcNMTYwNTA1MTYxNDMzWjArMCkCFCXxxcbS0ATpI2PYrx8d
+ACLEQ3B9FxExNjA1MDIxMjE0MzMtMDQwMKAjMCEwHwYDVR0jBBgwFoAUwsRNYCw4
+U2won66rMKEJm8inFfgwDQYJKoZIhvcNAQELBQADggEBAD/VvoRK4eaEDzG7Z95b
+fHL5ubJGkyvkp8ruNu+rfQp8NLgFVvY6a93Hz7WLOhACkKIWJ63+/4vCfDi5uU0B
+HW2FICHdlSQ+6DdGJ6MrgujALlyT+69iF+fPiJ/M1j/N7Am8XPYYcfNdSK6CHtfg
+gHNB7E+ubBA7lIw7ucIkoiJjXrSWSXTs9/GzLUImiXJAKQ+JzPYryIsGKXKAwgHh
+HB56BnJ2vOs7+6UxQ6fjKTMxYdNgoZ34MhkkxNNhylrEndO6XUvUvC1f/1p1wlzy
+xTq2MrMfJHJyu08rkrD+kwMPH2uoVwKyDhXdRBP0QrvQwOsvNEhW8LTKwLWkK17b
+fEI=
+-----END X509 CRL-----
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/cakey.pem b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/cakey.pem
new file mode 100644
index 0000000..ecba475
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/cakey.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEAwWPjnTqnkc6acah+wWLmdTK0oCrf2687XVhxVP3IN897TYzk
+aBQ2Dn1UM2VEL71sE3OZSVm0UWs5n7UqRuDp6mvkvrT2q5zgh/bVzg9ZL1AI5H7d
+Y2Rsor95I849ymFpXZooMgNtIQLxIeleBwzTnVSkFl8RqKM7NkjZwvBafQEjSsYk
+9050Bu0GMLgFJYRo1LozJLbwIs5ykG5F5PWTMfRvLCgLBzixPb75unIJ29nL0yB7
+zzUdkM8CG1EX8NkjGLEnpRnPa7+RMf8bd10v84cr0JFCUQmoabkssqVyA825/1we
+2r5Y8blyXZVIr2lcPyGocLDxz1qT1MqxrNQIywIDAQABAoIBAD1pBd9ov8t6Surq
+sY2hZUM0Hc16r+ln5LcInbx6djjaxvHiWql+OYgyXimP764lPYuTuspjFPKB1SOU
++N7XDxCkwFeayXXHdDlYtZ4gm5Z9mMVOT+j++8xWdxZaqJ56fmX9zOPM2LuR3paB
+L52Xgh9EwHJmMApYAzaCvbu8bU+iHeNTW80xabxQrp9VCu/A1BXUX06jK4T+wmjZ
+kDA82uQp3dCOF1tv/10HgwqkJj6/1jjM0XUzUZR6iV85S6jrA7wD7gDDeqNO8YHN
+08YMRgTKk4pbA7AqoC5xbL3gbSjsjyw48KRq0FkdkjsgV0PJZRMUU9fv9puDa23K
+WRPa8LECgYEAyeth5bVH8FXnVXIAAFU6W0WdgCK3VakhjItLw0eoxshuTwbVq64w
+CNOB8y1pfP83WiJjX3qRG43NDW07X69J57YKtCCb6KICVUPmecgYZPkmegD1HBQZ
+5+Aak+5pIUQuycQ0t65yHGu4Jsju05gEFgdzydFjNANgiPxRzZxzAkkCgYEA9S+y
+ZR063oCQDg/GhMLCx19nCJyU44Figh1YCD6kTrsSTECuRpQ5B1F9a+LeZT2wnYxv
++qMvvV+lfVY73f5WZ567u2jSDIsCH34p4g7sE25lKwo+Lhik6EtOehJFs2ZUemaT
+Ym7EjqWlC1whrG7P4MnTGzPOVNAGAxsGPtT58nMCgYAs/R8A2VU//UPfy9ioOlUY
+RPiEtjd3BIoPEHI+/lZihAHf5bvx1oupS8bmcbXRPeQNVyAhA+QU6ZFIbpAOD7Y9
+xFe6LpHOUVqHuOs/MxAMX17tTA1QxkHHYi1JzJLr8I8kMW01h86w+mc7bQWZa4Nt
+jReFXfvmeOInY2CumS8e0QKBgC23ow/vj1aFqla04lNG7YK3a0LTz39MVM3mItAG
+viRgBV1qghRu9uNCcpx3RPijtBbsZMTbQL+S4gyo06jlD79qfZ7IQMJN+SteHvkj
+xykoYHzSAB4gQj9+KzffyFdXMVFRZxHnjYb7o/amSzEXyHMlrtNXqZVu5HAXzeZR
+V/m5AoGAAStS43Q7qSJSMfMBITKMdKlqCObnifD77WeR2WHGrpkq26300ggsDpMS
+UTmnAAo77lSMmDsdoNn2XZmdeTu1CPoQnoZSE5CqPd5GeHA/hhegVCdeYxSXZJoH
+Lhiac+AhCEog/MS1GmVsjynD7eDGVFcsJ6SWuam7doKfrpPqPnE=
+-----END RSA PRIVATE KEY-----
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/generate.txt b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/generate.txt
new file mode 100644
index 0000000..5b888ee
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/generate.txt
@@ -0,0 +1,67 @@
+vault mount pki
+vault mount-tune -max-lease-ttl=438000h pki
+vault write pki/root/generate/exported common_name=myvault.com ttl=438000h ip_sans=127.0.0.1
+vi cacert.pem
+vi cakey.pem
+
+vaultcert.hcl
+backend "inmem" {
+}
+disable_mlock = true
+default_lease_ttl = "700h"
+max_lease_ttl = "768h"
+listener "tcp" {
+ address = "127.0.0.1:8200"
+ tls_cert_file = "./cacert.pem"
+ tls_key_file = "./cakey.pem"
+}
+========================================
+vault mount pki
+vault mount-tune -max-lease-ttl=438000h pki
+vault write pki/root/generate/exported common_name=myvault.com ttl=438000h max_ttl=438000h ip_sans=127.0.0.1
+vi testcacert1.pem
+vi testcakey1.pem
+vi testcaserial1
+
+vault write pki/config/urls issuing_certificates="http://127.0.0.1:8200/v1/pki/ca" crl_distribution_points="http://127.0.0.1:8200/v1/pki/crl"
+vault write pki/roles/myvault-dot-com allowed_domains=myvault.com allow_subdomains=true ttl=437999h max_ttl=438000h allow_ip_sans=true
+
+vault write pki/issue/myvault-dot-com common_name=cert.myvault.com format=pem ip_sans=127.0.0.1
+vi testissuedserial1
+
+vault write pki/issue/myvault-dot-com common_name=cert.myvault.com format=pem ip_sans=127.0.0.1
+vi testissuedcert2.pem
+vi testissuedkey2.pem
+vi testissuedserial2
+
+vault write pki/issue/myvault-dot-com common_name=cert.myvault.com format=pem ip_sans=127.0.0.1
+vi testissuedserial3
+
+vault write pki/issue/myvault-dot-com common_name=cert.myvault.com format=pem ip_sans=127.0.0.1
+vi testissuedcert4.pem
+vi testissuedkey4.pem
+vi testissuedserial4
+
+vault write pki/issue/myvault-dot-com common_name=cert.myvault.com format=pem ip_sans=127.0.0.1
+vi testissuedserial5
+
+vault write pki/revoke serial_number=$(cat testissuedserial2)
+vault write pki/revoke serial_number=$(cat testissuedserial4)
+curl -XGET "http://127.0.0.1:8200/v1/pki/crl/pem" -H "x-vault-token:123" > issuedcertcrl
+openssl crl -in issuedcertcrl -noout -text
+
+========================================
+export VAULT_ADDR='http://127.0.0.1:8200'
+vault mount pki
+vault mount-tune -max-lease-ttl=438000h pki
+vault write pki/root/generate/exported common_name=myvault.com ttl=438000h ip_sans=127.0.0.1
+vi testcacert2.pem
+vi testcakey2.pem
+vi testcaserial2
+vi testcacert2leaseid
+
+vault write pki/config/urls issuing_certificates="http://127.0.0.1:8200/v1/pki/ca" crl_distribution_points="http://127.0.0.1:8200/v1/pki/crl"
+vault revoke $(cat testcacert2leaseid)
+
+curl -XGET "http://127.0.0.1:8200/v1/pki/crl/pem" -H "x-vault-token:123" > cacert2crl
+openssl crl -in cacert2crl -noout -text
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/issuedcertcrl b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/issuedcertcrl
new file mode 100644
index 0000000..45e9a98
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/issuedcertcrl
@@ -0,0 +1,12 @@
+-----BEGIN X509 CRL-----
+MIIB2TCBwjANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDEwtteXZhdWx0LmNvbRcN
+MTYwNTAyMTYxMTA4WhcNMTYwNTA1MTYxMTA4WjBWMCkCFAS6oenLRllQ1MRYcSV+
+5ukv2563FxExNjA1MDIxMjExMDgtMDQwMDApAhQaQdPJfbIwE3q4nyYp60lVnZaE
+5hcRMTYwNTAyMTIxMTA1LTA0MDCgIzAhMB8GA1UdIwQYMBaAFOuKvPiUG06iHkRX
+AOeMiUdBfHFyMA0GCSqGSIb3DQEBCwUAA4IBAQBD2jkeOAmkDdYkAXbmjLGdHaQI
+WMS/M+wtFnHVIDVQEmUmj/KPsrkshTZv2UgCHIxBha6y+kXUMQFMg6FwriDTB170
+WyJVDVhGg2WjiQjnzrzEI+iOmcpx60sPPXE63J/Zxo4QS5M62RTXRq3909HQTFI5
+f3xf0pog8mOrv5uQxO1SACP6YFtdDE2dGOVwoIPuNMTY5vijnj8I9dAw8VrbdoBX
+m/Ky56kT+BpmVWHKwQd1nEcP/RHSKbZwwJzJG0BoGM8cvzjITtBmpEF+OZcea81x
+p9XJkpfFeiVIgzxks3zTeuQjLF8u+MDcdGt0ztHEbkswjxuk1cCovZe2GFr4
+-----END X509 CRL-----
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/keys/cert.pem b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/keys/cert.pem
new file mode 100644
index 0000000..942d266
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/keys/cert.pem
@@ -0,0 +1,22 @@
+-----BEGIN CERTIFICATE-----
+MIIDtTCCAp2gAwIBAgIUf+jhKTFBnqSs34II0WS1L4QsbbAwDQYJKoZIhvcNAQEL
+BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzQxWhcNMjUw
+MTA1MTAyODExWjAbMRkwFwYDVQQDExBjZXJ0LmV4YW1wbGUuY29tMIIBIjANBgkq
+hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxS
+TRAVnygAftetT8puHflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGn
+SgMld6ZWRhNheZhA6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmi
+YYMiIWplidMmMO5NTRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5
+donyqtnaHuIJGuUdy54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVG
+B+5+AAGF5iuHC3N2DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABo4H1
+MIHyMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUm++e
+HpyM3p708bgZJuRYEdX1o+UwHwYDVR0jBBgwFoAUncSzT/6HMexyuiU9/7EgHu+o
+k5swOwYIKwYBBQUHAQEELzAtMCsGCCsGAQUFBzAChh9odHRwOi8vMTI3LjAuMC4x
+OjgyMDAvdjEvcGtpL2NhMCEGA1UdEQQaMBiCEGNlcnQuZXhhbXBsZS5jb22HBH8A
+AAEwMQYDVR0fBCowKDAmoCSgIoYgaHR0cDovLzEyNy4wLjAuMTo4MjAwL3YxL3Br
+aS9jcmwwDQYJKoZIhvcNAQELBQADggEBABsuvmPSNjjKTVN6itWzdQy+SgMIrwfs
+X1Yb9Lefkkwmp9ovKFNQxa4DucuCuzXcQrbKwWTfHGgR8ct4rf30xCRoA7dbQWq4
+aYqNKFWrRaBRAaaYZ/O1ApRTOrXqRx9Eqr0H1BXLsoAq+mWassL8sf6siae+CpwA
+KqBko5G0dNXq5T4i2LQbmoQSVetIrCJEeMrU+idkuqfV2h1BQKgSEhFDABjFdTCN
+QDAHsEHsi2M4/jRW9fqEuhHSDfl2n7tkFUI8wTHUUCl7gXwweJ4qtaSXIwKXYzNj
+xqKHA8Purc1Yfybz4iE1JCROi9fInKlzr5xABq8nb9Qc/J9DIQM+Xmk=
+-----END CERTIFICATE-----
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/keys/key.pem b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/keys/key.pem
new file mode 100644
index 0000000..add9820
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/keys/key.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxSTRAVnygAftetT8pu
+HflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGnSgMld6ZWRhNheZhA
+6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmiYYMiIWplidMmMO5N
+TRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5donyqtnaHuIJGuUd
+y54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVGB+5+AAGF5iuHC3N2
+DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABAoIBAHR7fFV0eAGaopsX
+9OD0TUGlsephBXb43g0GYHfJ/1Ew18w9oaxszJEqkl+PB4W3xZ3yG3e8ZomxDOhF
+RreF2WgG5xOfhDogMwu6NodbArfgnAvoC6JnW3qha8HMP4F500RFVyCRcd6A3Frd
+rFtaZn/UyCsBAN8/zkwPeYHayo7xX6d9kzgRl9HluEX5PXI5+3uiBDUiM085gkLI
+5Cmadh9fMdjfhDXI4x2JYmILpp/9Nlc/krB15s5n1MPNtn3yL0TI0tWp0WlwDCV7
+oUm1SfIM0F1fXGFyFDcqwoIr6JCQgXk6XtTg31YhH1xgUIclUVdtHqmAwAbLdIhQ
+GAiHn2kCgYEAwD4pZ8HfpiOG/EHNoWsMATc/5yC7O8F9WbvcHZQIymLY4v/7HKZb
+VyOR6UQ5/O2cztSGIuKSF6+OK1C34lOyCuTSOTFrjlgEYtLIXjdGLfFdtOO8GRQR
+akVXdwuzNAjTBaH5eXbG+NKcjmCvZL48dQVlfDTVulzFGbcsVTHIMQUCgYEA7IQI
+FVsKnY3KqpyGqXq92LMcsT3XgW6X1BIIV+YhJ5AFUFkFrjrbXs94/8XyLfi0xBQy
+efK+8g5sMs7koF8LyZEcAXWZJQduaKB71hoLlRaU4VQkL/dl2B6VFmAII/CsRCYh
+r9RmDN2PF/mp98Ih9dpC1VqcCDRGoTYsd7jLalMCgYAMgH5k1wDaZxkSMp1S0AlZ
+0uP+/evvOOgT+9mWutfPgZolOQx1koQCKLgGeX9j6Xf3I28NubpSfAI84uTyfQrp
+FnRtb79U5Hh0jMynA+U2e6niZ6UF5H41cQj9Hu+qhKBkj2IP+h96cwfnYnZFkPGR
+kqZE65KyqfHPeFATwkcImQKBgCdrfhlpGiTWXCABhKQ8s+WpPLAB2ahV8XJEKyXT
+UlVQuMIChGLcpnFv7P/cUxf8asx/fUY8Aj0/0CLLvulHziQjTmKj4gl86pb/oIQ3
+xRRtNhU0O+/OsSfLORgIm3K6C0w0esregL/GMbJSR1TnA1gBr7/1oSnw5JC8Ab9W
+injHAoGAJT1MGAiQrhlt9GCGe6Ajw4omdbY0wS9NXefnFhf7EwL0es52ezZ28zpU
+2LXqSFbtann5CHgpSLxiMYPDIf+er4xgg9Bz34tz1if1rDfP2Qrxdrpr4jDnrGT3
+gYC2qCpvVD9RRUMKFfnJTfl5gMQdBW/LINkHtJ82snAeLl3gjQ4=
+-----END RSA PRIVATE KEY-----
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/keys/pkioutput b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/keys/pkioutput
new file mode 100644
index 0000000..526ff03
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/keys/pkioutput
@@ -0,0 +1,74 @@
+Key Value
+lease_id pki/issue/example-dot-com/d8214077-9976-8c68-9c07-6610da30aea4
+lease_duration 279359999
+lease_renewable false
+certificate -----BEGIN CERTIFICATE-----
+MIIDtTCCAp2gAwIBAgIUf+jhKTFBnqSs34II0WS1L4QsbbAwDQYJKoZIhvcNAQEL
+BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzQxWhcNMjUw
+MTA1MTAyODExWjAbMRkwFwYDVQQDExBjZXJ0LmV4YW1wbGUuY29tMIIBIjANBgkq
+hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxS
+TRAVnygAftetT8puHflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGn
+SgMld6ZWRhNheZhA6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmi
+YYMiIWplidMmMO5NTRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5
+donyqtnaHuIJGuUdy54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVG
+B+5+AAGF5iuHC3N2DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABo4H1
+MIHyMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUm++e
+HpyM3p708bgZJuRYEdX1o+UwHwYDVR0jBBgwFoAUncSzT/6HMexyuiU9/7EgHu+o
+k5swOwYIKwYBBQUHAQEELzAtMCsGCCsGAQUFBzAChh9odHRwOi8vMTI3LjAuMC4x
+OjgyMDAvdjEvcGtpL2NhMCEGA1UdEQQaMBiCEGNlcnQuZXhhbXBsZS5jb22HBH8A
+AAEwMQYDVR0fBCowKDAmoCSgIoYgaHR0cDovLzEyNy4wLjAuMTo4MjAwL3YxL3Br
+aS9jcmwwDQYJKoZIhvcNAQELBQADggEBABsuvmPSNjjKTVN6itWzdQy+SgMIrwfs
+X1Yb9Lefkkwmp9ovKFNQxa4DucuCuzXcQrbKwWTfHGgR8ct4rf30xCRoA7dbQWq4
+aYqNKFWrRaBRAaaYZ/O1ApRTOrXqRx9Eqr0H1BXLsoAq+mWassL8sf6siae+CpwA
+KqBko5G0dNXq5T4i2LQbmoQSVetIrCJEeMrU+idkuqfV2h1BQKgSEhFDABjFdTCN
+QDAHsEHsi2M4/jRW9fqEuhHSDfl2n7tkFUI8wTHUUCl7gXwweJ4qtaSXIwKXYzNj
+xqKHA8Purc1Yfybz4iE1JCROi9fInKlzr5xABq8nb9Qc/J9DIQM+Xmk=
+-----END CERTIFICATE-----
+issuing_ca -----BEGIN CERTIFICATE-----
+MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL
+BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw
+MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7
+Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0
+z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x
+AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb
+6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH
+SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G
+A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx
+7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc
+BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA
+wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2
+U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa
+cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N
+ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ
+t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk
+zehNe5dFTjFpylg1o6b8Ow==
+-----END CERTIFICATE-----
+private_key -----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxSTRAVnygAftetT8pu
+HflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGnSgMld6ZWRhNheZhA
+6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmiYYMiIWplidMmMO5N
+TRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5donyqtnaHuIJGuUd
+y54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVGB+5+AAGF5iuHC3N2
+DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABAoIBAHR7fFV0eAGaopsX
+9OD0TUGlsephBXb43g0GYHfJ/1Ew18w9oaxszJEqkl+PB4W3xZ3yG3e8ZomxDOhF
+RreF2WgG5xOfhDogMwu6NodbArfgnAvoC6JnW3qha8HMP4F500RFVyCRcd6A3Frd
+rFtaZn/UyCsBAN8/zkwPeYHayo7xX6d9kzgRl9HluEX5PXI5+3uiBDUiM085gkLI
+5Cmadh9fMdjfhDXI4x2JYmILpp/9Nlc/krB15s5n1MPNtn3yL0TI0tWp0WlwDCV7
+oUm1SfIM0F1fXGFyFDcqwoIr6JCQgXk6XtTg31YhH1xgUIclUVdtHqmAwAbLdIhQ
+GAiHn2kCgYEAwD4pZ8HfpiOG/EHNoWsMATc/5yC7O8F9WbvcHZQIymLY4v/7HKZb
+VyOR6UQ5/O2cztSGIuKSF6+OK1C34lOyCuTSOTFrjlgEYtLIXjdGLfFdtOO8GRQR
+akVXdwuzNAjTBaH5eXbG+NKcjmCvZL48dQVlfDTVulzFGbcsVTHIMQUCgYEA7IQI
+FVsKnY3KqpyGqXq92LMcsT3XgW6X1BIIV+YhJ5AFUFkFrjrbXs94/8XyLfi0xBQy
+efK+8g5sMs7koF8LyZEcAXWZJQduaKB71hoLlRaU4VQkL/dl2B6VFmAII/CsRCYh
+r9RmDN2PF/mp98Ih9dpC1VqcCDRGoTYsd7jLalMCgYAMgH5k1wDaZxkSMp1S0AlZ
+0uP+/evvOOgT+9mWutfPgZolOQx1koQCKLgGeX9j6Xf3I28NubpSfAI84uTyfQrp
+FnRtb79U5Hh0jMynA+U2e6niZ6UF5H41cQj9Hu+qhKBkj2IP+h96cwfnYnZFkPGR
+kqZE65KyqfHPeFATwkcImQKBgCdrfhlpGiTWXCABhKQ8s+WpPLAB2ahV8XJEKyXT
+UlVQuMIChGLcpnFv7P/cUxf8asx/fUY8Aj0/0CLLvulHziQjTmKj4gl86pb/oIQ3
+xRRtNhU0O+/OsSfLORgIm3K6C0w0esregL/GMbJSR1TnA1gBr7/1oSnw5JC8Ab9W
+injHAoGAJT1MGAiQrhlt9GCGe6Ajw4omdbY0wS9NXefnFhf7EwL0es52ezZ28zpU
+2LXqSFbtann5CHgpSLxiMYPDIf+er4xgg9Bz34tz1if1rDfP2Qrxdrpr4jDnrGT3
+gYC2qCpvVD9RRUMKFfnJTfl5gMQdBW/LINkHtJ82snAeLl3gjQ4=
+-----END RSA PRIVATE KEY-----
+private_key_type rsa
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/noclientauthcert.pem b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/noclientauthcert.pem
new file mode 100644
index 0000000..3948f22
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/noclientauthcert.pem
@@ -0,0 +1,19 @@
+-----BEGIN CERTIFICATE-----
+MIIDGTCCAgGgAwIBAgIBBDANBgkqhkiG9w0BAQUFADBxMQowCAYDVQQDFAEqMQsw
+CQYDVQQIEwJHQTELMAkGA1UEBhMCVVMxJTAjBgkqhkiG9w0BCQEWFnZpc2hhbG5h
+eWFrdkBnbWFpbC5jb20xEjAQBgNVBAoTCUhhc2hpQ29ycDEOMAwGA1UECxMFVmF1
+bHQwHhcNMTYwMjI5MjE0NjE2WhcNMjEwMjI3MjE0NjE2WjBxMQowCAYDVQQDFAEq
+MQswCQYDVQQIEwJHQTELMAkGA1UEBhMCVVMxJTAjBgkqhkiG9w0BCQEWFnZpc2hh
+bG5heWFrdkBnbWFpbC5jb20xEjAQBgNVBAoTCUhhc2hpQ29ycDEOMAwGA1UECxMF
+VmF1bHQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMfRkLfIGHt1r2jjnV0N
+LqRCu3oB+J1dqpM03vQt3qzIiqtuQuIA2ba7TJm2HwU3W3+rtfFcS+hkBR/LZM+u
+cBPB+9b9+7i08vHjgy2P3QH/Ebxa8j1v7JtRMT2qyxWK8NlT/+wZSH82Cr812aS/
+zNT56FbBo2UAtzpqeC4eiv6NAgMBAAGjQDA+MAkGA1UdEwQCMAAwCwYDVR0PBAQD
+AgXgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1UdEQQIMAaHBH8AAAEwDQYJKoZI
+hvcNAQEFBQADggEBAG2mUwsZ6+R8qqyNjzMk7mgpsRZv9TEl6c1IiQdyjaCOPaYH
+vtZpLX20um36cxrLuOUtZLllG/VJEhRZW5mXWxuOk4QunWMBXQioCDJG1ktcZAcQ
+QqYv9Dzy2G9lZHjLztEac37T75RXW7OEeQREgwP11c8sQYiS9jf+7ITYL7nXjoKq
+gEuH0h86BOH2O/BxgMelt9O0YCkvkLLHnE27xuNelRRZcBLSuE1GxdUi32MDJ+ff
+25GUNM0zzOEaJAFE/USUBEdQqN1gvJidNXkAiMtIK7T8omQZONRaD2ZnSW8y2krh
+eUg+rKis9RinqFlahLPfI5BlyQsNMEnsD07Q85E=
+-----END CERTIFICATE-----
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/root/pkioutput b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/root/pkioutput
new file mode 100644
index 0000000..312ae18
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/root/pkioutput
@@ -0,0 +1,74 @@
+Key Value
+lease_id pki/root/generate/exported/7bf99d76-dd3e-2c5b-04ce-5253062ad586
+lease_duration 315359999
+lease_renewable false
+certificate -----BEGIN CERTIFICATE-----
+MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL
+BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw
+MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7
+Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0
+z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x
+AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb
+6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH
+SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G
+A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx
+7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc
+BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA
+wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2
+U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa
+cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N
+ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ
+t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk
+zehNe5dFTjFpylg1o6b8Ow==
+-----END CERTIFICATE-----
+expiration 1.772072879e+09
+issuing_ca -----BEGIN CERTIFICATE-----
+MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL
+BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw
+MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7
+Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0
+z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x
+AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb
+6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH
+SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G
+A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx
+7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc
+BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA
+wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2
+U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa
+cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N
+ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ
+t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk
+zehNe5dFTjFpylg1o6b8Ow==
+-----END CERTIFICATE-----
+private_key -----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEA7FMy+FO4hFzZiHFmmY/B6A/zpyCep9PWZfGLUDtDuprHbg2p
+t/TQjegMxC0AmWPZEHeG9FIJvT+WQXuLUG5v5MbG4zs21mqnYXwWAbTPaZ35bpp3
+BbbquXFtAqOZG8yfrob1g9OSgmY+bG3ZNxxv35qmbBbuWyUvmPwfjzEAclMxv48w
+/2Rs4dXkBuvc9PiNdQ9Sv+ZYG8GIqIcbRd38cSaXI4Q94BOHEr4jm1vqb54H7twv
+0Gy9CnLXfn7ZtEDFSmG8WLk2GvIna+UC+gnxSqKCw0rcTby63rQUCgdJZF8VQUVi
+18+BMNLXI4pT/P9cxTaCJC/YeuV5a3SaniOoFQIDAQABAoIBAQCoGZJC84JnnIgb
+ttZNWuWKBXbCJcDVDikOQJ9hBZbqsFg1X0CfGmQS3MHf9Ubc1Ro8zVjQh15oIEfn
+8lIpdzTeXcpxLdiW8ix3ekVJF20F6pnXY8ZP6UnTeOwamXY6QPZAtb0D9UXcvY+f
+nw+IVRD6082XS0Rmzu+peYWVXDy+FDN+HJRANBcdJZz8gOmNBIe0qDWx1b85d/s8
+2Kk1Wwdss1IwAGeSddTSwzBNaaHdItZaMZOqPW1gRyBfVSkcUQIE6zn2RKw2b70t
+grkIvyRcTdfmiKbqkkJ+eR+ITOUt0cBZSH4cDjlQA+r7hulvoBpQBRj068Toxkcc
+bTagHaPBAoGBAPWPGVkHqhTbJ/DjmqDIStxby2M1fhhHt4xUGHinhUYjQjGOtDQ9
+0mfaB7HObudRiSLydRAVGAHGyNJdQcTeFxeQbovwGiYKfZSA1IGpea7dTxPpGEdN
+ksA0pzSp9MfKzX/MdLuAkEtO58aAg5YzsgX9hDNxo4MhH/gremZhEGZlAoGBAPZf
+lqdYvAL0fjHGJ1FUEalhzGCGE9PH2iOqsxqLCXK7bDbzYSjvuiHkhYJHAOgVdiW1
+lB34UHHYAqZ1VVoFqJ05gax6DE2+r7K5VV3FUCaC0Zm3pavxchU9R/TKP82xRrBj
+AFWwdgDTxUyvQEmgPR9sqorftO71Iz2tiwyTpIfxAoGBAIhEMLzHFAse0rtKkrRG
+ccR27BbRyHeQ1Lp6sFnEHKEfT8xQdI/I/snCpCJ3e/PBu2g5Q9z416mktiyGs8ib
+thTNgYsGYnxZtfaCx2pssanoBcn2wBJRae5fSapf5gY49HDG9MBYR7qCvvvYtSzU
+4yWP2ZzyotpRt3vwJKxLkN5BAoGAORHpZvhiDNkvxj3da7Rqpu7VleJZA2y+9hYb
+iOF+HcqWhaAY+I+XcTRrTMM/zYLzLEcEeXDEyao86uwxCjpXVZw1kotvAC9UqbTO
+tnr3VwRkoxPsV4kFYTAh0+1pnC8dbcxxDmhi3Uww3tOVs7hfkEDuvF6XnebA9A+Y
+LyCgMzECgYEA6cCU8QODOivIKWFRXucvWckgE6MYDBaAwe6qcLsd1Q/gpE2e3yQc
+4RB3bcyiPROLzMLlXFxf1vSNJQdIaVfrRv+zJeGIiivLPU8+Eq4Lrb+tl1LepcOX
+OzQeADTSCn5VidOfjDkIst9UXjMlrFfV9/oJEw5Eiqa6lkNPCGDhfA8=
+-----END RSA PRIVATE KEY-----
+private_key_type rsa
+serial_number 6f:98:9d:f8:67:1a:31:e3:27:60:1b:f7:32:f7:53:19:68:a0:c8:9d
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/root/root.crl b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/root/root.crl
new file mode 100644
index 0000000..a80c9e4
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/root/root.crl
@@ -0,0 +1,12 @@
+-----BEGIN X509 CRL-----
+MIIBrjCBlzANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbRcN
+MTYwMjI5MDIyOTE3WhcNMjUwMTA1MTAyOTE3WjArMCkCFG+YnfhnGjHjJ2Ab9zL3
+UxlooMidFxExNjAyMjgyMTI5MTctMDUwMKAjMCEwHwYDVR0jBBgwFoAUncSzT/6H
+MexyuiU9/7EgHu+ok5swDQYJKoZIhvcNAQELBQADggEBAG9YDXpNe4LJroKZmVCn
+HqMhW8eyzyaPak2nPPGCVUnc6vt8rlBYQU+xlBizD6xatZQDMPgrT8sBl9W3ysXk
+RUlliHsT/SHddMz5dAZsBPRMJ7pYWLTx8jI4w2WRfbSyI4bY/6qTRNkEBUv+Fk8J
+xvwB89+EM0ENcVMhv9ghsUA8h7kOg673HKwRstLDAzxS/uLmEzFjj8SV2m5DbV2Y
+UUCKRSV20/kxJMIC9x2KikZhwOSyv1UE1otD+RQvbfAoZPUDmvp2FR/E0NGjBBOg
+1TtCPRrl63cjqU3s8KQ4uah9Vj+Cwcu9n/yIKKtNQq4NKHvagv8GlUsoJ4BdAxCw
+IA0=
+-----END X509 CRL-----
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/root/rootcacert.pem b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/root/rootcacert.pem
new file mode 100644
index 0000000..dcb307a
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/root/rootcacert.pem
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL
+BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw
+MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7
+Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0
+z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x
+AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb
+6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH
+SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G
+A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx
+7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc
+BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA
+wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2
+U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa
+cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N
+ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ
+t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk
+zehNe5dFTjFpylg1o6b8Ow==
+-----END CERTIFICATE-----
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/root/rootcakey.pem b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/root/rootcakey.pem
new file mode 100644
index 0000000..e950da5
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/root/rootcakey.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEA7FMy+FO4hFzZiHFmmY/B6A/zpyCep9PWZfGLUDtDuprHbg2p
+t/TQjegMxC0AmWPZEHeG9FIJvT+WQXuLUG5v5MbG4zs21mqnYXwWAbTPaZ35bpp3
+BbbquXFtAqOZG8yfrob1g9OSgmY+bG3ZNxxv35qmbBbuWyUvmPwfjzEAclMxv48w
+/2Rs4dXkBuvc9PiNdQ9Sv+ZYG8GIqIcbRd38cSaXI4Q94BOHEr4jm1vqb54H7twv
+0Gy9CnLXfn7ZtEDFSmG8WLk2GvIna+UC+gnxSqKCw0rcTby63rQUCgdJZF8VQUVi
+18+BMNLXI4pT/P9cxTaCJC/YeuV5a3SaniOoFQIDAQABAoIBAQCoGZJC84JnnIgb
+ttZNWuWKBXbCJcDVDikOQJ9hBZbqsFg1X0CfGmQS3MHf9Ubc1Ro8zVjQh15oIEfn
+8lIpdzTeXcpxLdiW8ix3ekVJF20F6pnXY8ZP6UnTeOwamXY6QPZAtb0D9UXcvY+f
+nw+IVRD6082XS0Rmzu+peYWVXDy+FDN+HJRANBcdJZz8gOmNBIe0qDWx1b85d/s8
+2Kk1Wwdss1IwAGeSddTSwzBNaaHdItZaMZOqPW1gRyBfVSkcUQIE6zn2RKw2b70t
+grkIvyRcTdfmiKbqkkJ+eR+ITOUt0cBZSH4cDjlQA+r7hulvoBpQBRj068Toxkcc
+bTagHaPBAoGBAPWPGVkHqhTbJ/DjmqDIStxby2M1fhhHt4xUGHinhUYjQjGOtDQ9
+0mfaB7HObudRiSLydRAVGAHGyNJdQcTeFxeQbovwGiYKfZSA1IGpea7dTxPpGEdN
+ksA0pzSp9MfKzX/MdLuAkEtO58aAg5YzsgX9hDNxo4MhH/gremZhEGZlAoGBAPZf
+lqdYvAL0fjHGJ1FUEalhzGCGE9PH2iOqsxqLCXK7bDbzYSjvuiHkhYJHAOgVdiW1
+lB34UHHYAqZ1VVoFqJ05gax6DE2+r7K5VV3FUCaC0Zm3pavxchU9R/TKP82xRrBj
+AFWwdgDTxUyvQEmgPR9sqorftO71Iz2tiwyTpIfxAoGBAIhEMLzHFAse0rtKkrRG
+ccR27BbRyHeQ1Lp6sFnEHKEfT8xQdI/I/snCpCJ3e/PBu2g5Q9z416mktiyGs8ib
+thTNgYsGYnxZtfaCx2pssanoBcn2wBJRae5fSapf5gY49HDG9MBYR7qCvvvYtSzU
+4yWP2ZzyotpRt3vwJKxLkN5BAoGAORHpZvhiDNkvxj3da7Rqpu7VleJZA2y+9hYb
+iOF+HcqWhaAY+I+XcTRrTMM/zYLzLEcEeXDEyao86uwxCjpXVZw1kotvAC9UqbTO
+tnr3VwRkoxPsV4kFYTAh0+1pnC8dbcxxDmhi3Uww3tOVs7hfkEDuvF6XnebA9A+Y
+LyCgMzECgYEA6cCU8QODOivIKWFRXucvWckgE6MYDBaAwe6qcLsd1Q/gpE2e3yQc
+4RB3bcyiPROLzMLlXFxf1vSNJQdIaVfrRv+zJeGIiivLPU8+Eq4Lrb+tl1LepcOX
+OzQeADTSCn5VidOfjDkIst9UXjMlrFfV9/oJEw5Eiqa6lkNPCGDhfA8=
+-----END RSA PRIVATE KEY-----
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testcacert1.pem b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testcacert1.pem
new file mode 100644
index 0000000..ab8bf9e
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testcacert1.pem
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDPjCCAiagAwIBAgIUfIKsF2VPT7sdFcKOHJH2Ii6K4MwwDQYJKoZIhvcNAQEL
+BQAwFjEUMBIGA1UEAxMLbXl2YXVsdC5jb20wIBcNMTYwNTAyMTYwNTQyWhgPMjA2
+NjA0MjAxNjA2MTJaMBYxFDASBgNVBAMTC215dmF1bHQuY29tMIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuOimEXawD2qBoLCFP3Skq5zi1XzzcMAJlfdS
+xz9hfymuJb+cN8rB91HOdU9wQCwVKnkUtGWxUnMp0tT0uAZj5NzhNfyinf0JGAbP
+67HDzVZhGBHlHTjPX0638yaiUx90cTnucX0N20SgCYct29dMSgcPl+W78D3Jw3xE
+JsHQPYS9ASe2eONxG09F/qNw7w/RO5/6WYoV2EmdarMMxq52pPe2chtNMQdSyOUb
+cCcIZyk4QVFZ1ZLl6jTnUPb+JoCx1uMxXvMek4NF/5IL0Wr9dw2gKXKVKoHDr6SY
+WrCONRw61A5Zwx1V+kn73YX3USRlkufQv/ih6/xThYDAXDC9cwIDAQABo4GBMH8w
+DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFOuKvPiU
+G06iHkRXAOeMiUdBfHFyMB8GA1UdIwQYMBaAFOuKvPiUG06iHkRXAOeMiUdBfHFy
+MBwGA1UdEQQVMBOCC215dmF1bHQuY29thwR/AAABMA0GCSqGSIb3DQEBCwUAA4IB
+AQBcN/UdAMzc7UjRdnIpZvO+5keBGhL/vjltnGM1dMWYHa60Y5oh7UIXF+P1RdNW
+n7g80lOyvkSR15/r1rDkqOK8/4oruXU31EcwGhDOC4hU6yMUy4ltV/nBoodHBXNh
+MfKiXeOstH1vdI6G0P6W93Bcww6RyV1KH6sT2dbETCw+iq2VN9CrruGIWzd67UT/
+spe/kYttr3UYVV3O9kqgffVVgVXg/JoRZ3J7Hy2UEXfh9UtWNanDlRuXaZgE9s/d
+CpA30CHpNXvKeyNeW2ktv+2nAbSpvNW+e6MecBCTBIoDSkgU8ShbrzmDKVwNN66Q
+5gn6KxUPBKHEtNzs5DgGM7nq
+-----END CERTIFICATE-----
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testcacert2.pem b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testcacert2.pem
new file mode 100644
index 0000000..a8fe6c4
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testcacert2.pem
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDPjCCAiagAwIBAgIUJfHFxtLQBOkjY9ivHx0AIsRDcH0wDQYJKoZIhvcNAQEL
+BQAwFjEUMBIGA1UEAxMLbXl2YXVsdC5jb20wIBcNMTYwNTAyMTYxMjI5WhgPMjA2
+NjA0MjAxNjEyNTlaMBYxFDASBgNVBAMTC215dmF1bHQuY29tMIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqj8ANjAGrg5BgUb3owGwUHlMYDxljMdwroA/
+Bv76ESjomj1zCyVtoJxlDZ8m9VcKQldk5ashFNuY+Ms9FrJ1YsePvsfStNe37C26
+2uldDToh5rm7K8uwp/bQiErwM9QZMCVYCPEH8QgETPg9qWnikDFLMqcLBNbIiXVL
+alxEYgA1Qt6+ayMvoS35288hFdZj6a0pCF0+zMHORZxloPhkXWnZLp5lWBiunSJG
+0kVz56TjF+oY0L74iW4y3x2805biisGvFqgpZJW8/hLw/kDthNylNTzEqBktsctQ
+BXpSMcwG3woJ0uZ8cH/HA/m0VDeIA77UisXnlLiQDpdB7U7QPwIDAQABo4GBMH8w
+DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMLETWAs
+OFNsKJ+uqzChCZvIpxX4MB8GA1UdIwQYMBaAFMLETWAsOFNsKJ+uqzChCZvIpxX4
+MBwGA1UdEQQVMBOCC215dmF1bHQuY29thwR/AAABMA0GCSqGSIb3DQEBCwUAA4IB
+AQCRlFb6bZDrq3NkoZF9evls7cT41V3XCdykMA4K9YRgDroZ5psanSvYEnSrk9cU
+Y7sVYW7b8qSRWkLZrHCAwc2V0/i5F5j4q9yVnWaTZ+kOVCFYCI8yUS7ixRQdTLNN
+os/r9dcRSzzTEqoQThAzn571yRcbJHzTjda3gCJ5F4utYUBU2F9WK+ukW9nqfepa
+ju5vEEGDuL2+RyApzL0nGzMUkCdBcK82QBksTlElPnbICbJZWUUMTZWPaZ7WGDDa
+Pj+pWMXiDQmzIuzgXUCNtQL6lEv4tQwGYRHjjPmhgJP4sr6Cyrj4G0iljrqM+z/3
+gLyJOlNU8c5x02/C1nFDDa14
+-----END CERTIFICATE-----
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testcakey1.pem b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testcakey1.pem
new file mode 100644
index 0000000..05211ba
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testcakey1.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAuOimEXawD2qBoLCFP3Skq5zi1XzzcMAJlfdSxz9hfymuJb+c
+N8rB91HOdU9wQCwVKnkUtGWxUnMp0tT0uAZj5NzhNfyinf0JGAbP67HDzVZhGBHl
+HTjPX0638yaiUx90cTnucX0N20SgCYct29dMSgcPl+W78D3Jw3xEJsHQPYS9ASe2
+eONxG09F/qNw7w/RO5/6WYoV2EmdarMMxq52pPe2chtNMQdSyOUbcCcIZyk4QVFZ
+1ZLl6jTnUPb+JoCx1uMxXvMek4NF/5IL0Wr9dw2gKXKVKoHDr6SYWrCONRw61A5Z
+wx1V+kn73YX3USRlkufQv/ih6/xThYDAXDC9cwIDAQABAoIBAG3bCo7ljMQb6tel
+CAUjL5Ilqz5a9ebOsONABRYLOclq4ePbatxawdJF7/sSLwZxKkIJnZtvr2Hkubxg
+eOO8KC0YbVS9u39Rjc2QfobxHfsojpbWSuCJl+pvwinbkiUAUxXR7S/PtCPJKat/
+fGdYCiMQ/tqnynh4vR4+/d5o12c0KuuQ22/MdEf3GOadUamRXS1ET9iJWqla1pJW
+TmzrlkGAEnR5PPO2RMxbnZCYmj3dArxWAnB57W+bWYla0DstkDKtwg2j2ikNZpXB
+nkZJJpxR76IYD1GxfwftqAKxujKcyfqB0dIKCJ0UmfOkauNWjexroNLwaAOC3Nud
+XIxppAECgYEA1wJ9EH6A6CrSjdzUocF9LtQy1LCDHbdiQFHxM5/zZqIxraJZ8Gzh
+Q0d8JeOjwPdG4zL9pHcWS7+x64Wmfn0+Qfh6/47Vy3v90PIL0AeZYshrVZyJ/s6X
+YkgFK80KEuWtacqIZ1K2UJyCw81u/ynIl2doRsIbgkbNeN0opjmqVTMCgYEA3CkW
+2fETWK1LvmgKFjG1TjOotVRIOUfy4iN0kznPm6DK2PgTF5DX5RfktlmA8i8WPmB7
+YFOEdAWHf+RtoM/URa7EAGZncCWe6uggAcWqznTS619BJ63OmncpSWov5Byg90gJ
+48qIMY4wDjE85ypz1bmBc2Iph974dtWeDtB7dsECgYAyKZh4EquMfwEkq9LH8lZ8
+aHF7gbr1YeWAUB3QB49H8KtacTg+iYh8o97pEBUSXh6hvzHB/y6qeYzPAB16AUpX
+Jdu8Z9ylXsY2y2HKJRu6GjxAewcO9bAH8/mQ4INrKT6uIdx1Dq0OXZV8jR9KVLtB
+55RCfeLhIBesDR0Auw9sVQKBgB0xTZhkgP43LF35Ca1btgDClNJGdLUztx8JOIH1
+HnQyY/NVIaL0T8xO2MLdJ131pGts+68QI/YGbaslrOuv4yPCQrcS3RBfzKy1Ttkt
+TrLFhtoy7T7HqyeMOWtEq0kCCs3/PWB5EIoRoomfOcYlOOrUCDg2ge9EP4nyVVz9
+hAGBAoGBAJXw/ufevxpBJJMSyULmVWYr34GwLC1OhSE6AVVt9JkIYnc5L4xBKTHP
+QNKKJLmFmMsEqfxHUNWmpiHkm2E0p37Zehui3kywo+A4ybHPTua70ZWQfZhKxLUr
+PvJa8JmwiCM7kO8zjOv+edY1mMWrbjAZH1YUbfcTHmST7S8vp0F3
+-----END RSA PRIVATE KEY-----
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testcakey2.pem b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testcakey2.pem
new file mode 100644
index 0000000..c2e3763
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testcakey2.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAqj8ANjAGrg5BgUb3owGwUHlMYDxljMdwroA/Bv76ESjomj1z
+CyVtoJxlDZ8m9VcKQldk5ashFNuY+Ms9FrJ1YsePvsfStNe37C262uldDToh5rm7
+K8uwp/bQiErwM9QZMCVYCPEH8QgETPg9qWnikDFLMqcLBNbIiXVLalxEYgA1Qt6+
+ayMvoS35288hFdZj6a0pCF0+zMHORZxloPhkXWnZLp5lWBiunSJG0kVz56TjF+oY
+0L74iW4y3x2805biisGvFqgpZJW8/hLw/kDthNylNTzEqBktsctQBXpSMcwG3woJ
+0uZ8cH/HA/m0VDeIA77UisXnlLiQDpdB7U7QPwIDAQABAoIBADivQ2XHdeHsUzk1
+JOz8efVBfgGo+nL2UPl5MAMnUKH4CgKZJT3311mb2TXA4RrdQUg3ixvBcAFe4L8u
+BIgTIWyjX6Q5KloWXWHhFA8hll76FSGag8ygRJCYaHSI5xOKslxKgtZvUqKZdb0f
+BoDrBYnXL9+MqOmSjjDegh7G2+n49n774Z2VVR47TZTBB5LCWDWj4AtEcalgwlvw
+d5yL/GU/RfCkXCjCeie1pInp3eCMUI9jlvbe/vyaoFq2RiaJw1LSlJLXZBMYzaij
+XkgMtRsr5bf0Tg2z3SPiaa9QZogfVLqHWAt6RHZf9Keidtiho+Ad6/dzJu+jKDys
+Z6cthOECgYEAxMUCIYKO74BtPRN2r7KxbSjHzFsasxbfwkSg4Qefd4UoZJX2ShlL
+cClnef3WdkKxtShJhqEPaKTYTrfgM+iz/a9+3lAFnS4EZawSf3YgXXslVTory0Da
+yPQZKxX6XsupaLl4s13ehw/D0qfdxWVYaiFad3ePEE4ytmSkMMHLHo8CgYEA3X4a
+jMWVbVv1W1lj+LFcg7AhU7lHgla+p7NI4gHw9V783noafnW7/8pNF80kshYo4u0g
+aJRwaU/Inr5uw14eAyEjB4X7N8AE5wGmcxxS2uluGG6r3oyQSJBqktGnLwyTfcfC
+XrfsGJza2BRGF4Mn8SFb7WtCl3f1qu0hTF+mC1ECgYB4oA1eXZsiV6if+H6Z1wHN
+2WIidPc5MpyZi1jUmse3jXnlr8j8Q+VrLPayYlpGxTwLwlbQoYvAqs2v9CkNqWot
+6pfr0UKfyMYJTiNI4DGXHRcV2ENgprF436tOLnr+AfwopwrHapQwWAnD6gSaLja1
+WR0Mf87EQCv2hFvjR+otIQKBgQCLyvJQ1MeZzQdPT1zkcnSUfM6b+/1hCwSr7WDb
+nCQLiZcJh4E/PWmZaII9unEloQzPJKBmwQEtxng1kLVxwu4oRXrJXcuPhTbS4dy/
+HCpDFj8xVnBNNuQ9mEBbR80/ya0xHqnThDuT0TPiWvFeF55W9xoA/8h4tvKrnZx9
+ioTO8QKBgCMqRa5pHb+vCniTWUTz9JZRnRsdq7fRSsJHngMe5gOR4HylyAmmqKrd
+kEXfkdu9TH2jxSWcZbHUPVwKfOUqQUZMz0pml0DIs1kedUDFanTZ8Rgg5SGUHBW0
+5bNCq64tKMmw6GiicaAGqd04OPo85WD9h8mPhM1Jdv/UmTV+HFAr
+-----END RSA PRIVATE KEY-----
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testissuedcert4.pem b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testissuedcert4.pem
new file mode 100644
index 0000000..5bffd67
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testissuedcert4.pem
@@ -0,0 +1,22 @@
+-----BEGIN CERTIFICATE-----
+MIIDtzCCAp+gAwIBAgIUBLqh6ctGWVDUxFhxJX7m6S/bnrcwDQYJKoZIhvcNAQEL
+BQAwFjEUMBIGA1UEAxMLbXl2YXVsdC5jb20wIBcNMTYwNTAyMTYwOTI2WhgPMjA2
+NjA0MjAxNTA5NTZaMBsxGTAXBgNVBAMTEGNlcnQubXl2YXVsdC5jb20wggEiMA0G
+CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDY3gPB29kkdbu0mPO6J0efagQhSiXB
+9OyDuLf5sMk6CVDWVWal5hISkyBmw/lXgF7qC2XFKivpJOrcGQd5Ep9otBqyJLzI
+b0IWdXuPIrVnXDwcdWr86ybX2iC42zKWfbXgjzGijeAVpl0UJLKBj+fk5q6NvkRL
+5FUL6TRV7Krn9mrmnrV9J5IqV15pTd9W2aVJ6IqWvIPCACtZKulqWn4707uy2X2W
+1Stq/5qnp1pDshiGk1VPyxCwQ6yw3iEcgecbYo3vQfhWcv7Q8LpSIM9ZYpXu6OmF
++czqRZS9gERl+wipmmrN1MdYVrTuQem21C/PNZ4jo4XUk1SFx6JrcA+lAgMBAAGj
+gfUwgfIwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBSe
+Cl9WV3BjGCwmS/KrDSLRjfwyqjAfBgNVHSMEGDAWgBTrirz4lBtOoh5EVwDnjIlH
+QXxxcjA7BggrBgEFBQcBAQQvMC0wKwYIKwYBBQUHMAKGH2h0dHA6Ly8xMjcuMC4w
+LjE6ODIwMC92MS9wa2kvY2EwIQYDVR0RBBowGIIQY2VydC5teXZhdWx0LmNvbYcE
+fwAAATAxBgNVHR8EKjAoMCagJKAihiBodHRwOi8vMTI3LjAuMC4xOjgyMDAvdjEv
+cGtpL2NybDANBgkqhkiG9w0BAQsFAAOCAQEAWGholPN8buDYwKbUiDavbzjsxUIX
+lU4MxEqOHw7CD3qIYIauPboLvB9EldBQwhgOOy607Yvdg3rtyYwyBFwPhHo/hK3Z
+6mn4hc6TF2V+AUdHBvGzp2dbYLeo8noVoWbQ/lBulggwlIHNNF6+a3kALqsqk1Ch
+f/hzsjFnDhAlNcYFgG8TgfE2lE/FckvejPqBffo7Q3I+wVAw0buqiz5QL81NOT+D
+Y2S9LLKLRaCsWo9wRU1Az4Rhd7vK5SEMh16jJ82GyEODWPvuxOTI1MnzfnbWyLYe
+TTp6YBjGMVf1I6NEcWNur7U17uIOiQjMZ9krNvoMJ1A/cxCoZ98QHgcIPg==
+-----END CERTIFICATE-----
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testissuedkey4.pem b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testissuedkey4.pem
new file mode 100644
index 0000000..58e7f8d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testissuedkey4.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEA2N4DwdvZJHW7tJjzuidHn2oEIUolwfTsg7i3+bDJOglQ1lVm
+peYSEpMgZsP5V4Be6gtlxSor6STq3BkHeRKfaLQasiS8yG9CFnV7jyK1Z1w8HHVq
+/Osm19oguNsyln214I8xoo3gFaZdFCSygY/n5Oaujb5ES+RVC+k0Veyq5/Zq5p61
+fSeSKldeaU3fVtmlSeiKlryDwgArWSrpalp+O9O7stl9ltUrav+ap6daQ7IYhpNV
+T8sQsEOssN4hHIHnG2KN70H4VnL+0PC6UiDPWWKV7ujphfnM6kWUvYBEZfsIqZpq
+zdTHWFa07kHpttQvzzWeI6OF1JNUhceia3APpQIDAQABAoIBAQCH3vEzr+3nreug
+RoPNCXcSJXXY9X+aeT0FeeGqClzIg7Wl03OwVOjVwl/2gqnhbIgK0oE8eiNwurR6
+mSPZcxV0oAJpwiKU4T/imlCDaReGXn86xUX2l82KRxthNdQH/VLKEmzij0jpx4Vh
+bWx5SBPdkbmjDKX1dmTiRYWIn/KjyNPvNvmtwdi8Qluhf4eJcNEUr2BtblnGOmfL
+FdSu+brPJozpoQ1QdDnbAQRgqnh7Shl0tT85whQi0uquqIj1gEOGVjmBvDDnL3GV
+WOENTKqsmIIoEzdZrql1pfmYTk7WNaD92bfpN128j8BF7RmAV4/DphH0pvK05y9m
+tmRhyHGxAoGBAOV2BBocsm6xup575VqmFN+EnIOiTn+haOvfdnVsyQHnth63fOQx
+PNtMpTPR1OMKGpJ13e2bV0IgcYRsRkScVkUtoa/17VIgqZXffnJJ0A/HT67uKBq3
+8o7RrtyK5N20otw0lZHyqOPhyCdpSsurDhNON1kPVJVYY4N1RiIxfut/AoGBAPHz
+HfsJ5ZkyELE9N/r4fce04lprxWH+mQGK0/PfjS9caXPhj/r5ZkVMvzWesF3mmnY8
+goE5S35TuTvV1+6rKGizwlCFAQlyXJiFpOryNWpLwCmDDSzLcm+sToAlML3tMgWU
+jM3dWHx3C93c3ft4rSWJaUYI9JbHsMzDW6Yh+GbbAoGBANIbKwxh5Hx5XwEJP2yu
+kIROYCYkMy6otHLujgBdmPyWl+suZjxoXWoMl2SIqR8vPD+Jj6mmyNJy9J6lqf3f
+DRuQ+fEuBZ1i7QWfvJ+XuN0JyovJ5Iz6jC58D1pAD+p2IX3y5FXcVQs8zVJRFjzB
+p0TEJOf2oqORaKWRd6ONoMKvAoGALKu6aVMWdQZtVov6/fdLIcgf0pn7Q3CCR2qe
+X3Ry2L+zKJYIw0mwvDLDSt8VqQCenB3n6nvtmFFU7ds5lvM67rnhsoQcAOaAehiS
+rl4xxoJd5Ewx7odRhZTGmZpEOYzFo4odxRSM9c30/u18fqV1Mm0AZtHYds4/sk6P
+aUj0V+kCgYBMpGrJk8RSez5g0XZ35HfpI4ENoWbiwB59FIpWsLl2LADEh29eC455
+t9Muq7MprBVBHQo11TMLLFxDIjkuMho/gcKgpYXCt0LfiNm8EZehvLJUXH+3WqUx
+we6ywrbFCs6LaxaOCtTiLsN+GbZCatITL0UJaeBmTAbiw0KQjUuZPQ==
+-----END RSA PRIVATE KEY-----
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/github/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/github/backend.go
new file mode 100644
index 0000000..0dbe893
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/github/backend.go
@@ -0,0 +1,91 @@
+package github
+
+import (
+ "github.com/google/go-github/github"
+ "github.com/hashicorp/go-cleanhttp"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ "golang.org/x/oauth2"
+)
+
+func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
+ return Backend().Setup(conf)
+}
+
+func Backend() *backend {
+ var b backend
+ b.TeamMap = &framework.PolicyMap{
+ PathMap: framework.PathMap{
+ Name: "teams",
+ },
+ DefaultKey: "default",
+ }
+
+ b.UserMap = &framework.PolicyMap{
+ PathMap: framework.PathMap{
+ Name: "users",
+ },
+ DefaultKey: "default",
+ }
+
+ allPaths := append(b.TeamMap.Paths(), b.UserMap.Paths()...)
+
+ b.Backend = &framework.Backend{
+ Help: backendHelp,
+
+ PathsSpecial: &logical.Paths{
+ Unauthenticated: []string{
+ "login",
+ },
+ },
+
+ Paths: append([]*framework.Path{
+ pathConfig(&b),
+ pathLogin(&b),
+ }, allPaths...),
+
+ AuthRenew: b.pathLoginRenew,
+ }
+
+ return &b
+}
+
+type backend struct {
+ *framework.Backend
+
+ TeamMap *framework.PolicyMap
+
+ UserMap *framework.PolicyMap
+}
+
+// Client returns the GitHub client to communicate to GitHub via the
+// configured settings.
+func (b *backend) Client(token string) (*github.Client, error) {
+ tc := cleanhttp.DefaultClient()
+ if token != "" {
+ tc = oauth2.NewClient(oauth2.NoContext, &tokenSource{Value: token})
+ }
+
+ return github.NewClient(tc), nil
+}
+
+// tokenSource is an oauth2.TokenSource implementation.
+type tokenSource struct {
+ Value string
+}
+
+func (t *tokenSource) Token() (*oauth2.Token, error) {
+ return &oauth2.Token{AccessToken: t.Value}, nil
+}
+
+const backendHelp = `
+The GitHub credential provider allows authentication via GitHub.
+
+Users provide a personal access token to log in, and the credential
+provider verifies they're part of the correct organization and then
+maps the user to a set of Vault policies according to the teams they're
+part of.
+
+After enabling the credential provider, use the "config" route to
+configure it.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/github/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/github/backend_test.go
new file mode 100644
index 0000000..037c2ca
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/github/backend_test.go
@@ -0,0 +1,202 @@
+package github
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+ logicaltest "github.com/hashicorp/vault/logical/testing"
+)
+
+func TestBackend_Config(t *testing.T) {
+ defaultLeaseTTLVal := time.Hour * 24
+ maxLeaseTTLVal := time.Hour * 24 * 2
+ b, err := Factory(&logical.BackendConfig{
+ Logger: nil,
+ System: &logical.StaticSystemView{
+ DefaultLeaseTTLVal: defaultLeaseTTLVal,
+ MaxLeaseTTLVal: maxLeaseTTLVal,
+ },
+ })
+ if err != nil {
+ t.Fatalf("Unable to create backend: %s", err)
+ }
+
+ login_data := map[string]interface{}{
+ // This token has to be replaced with a working token for the test to work.
+ "token": os.Getenv("GITHUB_TOKEN"),
+ }
+ config_data1 := map[string]interface{}{
+ "organization": os.Getenv("GITHUB_ORG"),
+ "ttl": "",
+ "max_ttl": "",
+ }
+ expectedTTL1, _ := time.ParseDuration("24h0m0s")
+ config_data2 := map[string]interface{}{
+ "organization": os.Getenv("GITHUB_ORG"),
+ "ttl": "1h",
+ "max_ttl": "2h",
+ }
+ expectedTTL2, _ := time.ParseDuration("1h0m0s")
+ config_data3 := map[string]interface{}{
+ "organization": os.Getenv("GITHUB_ORG"),
+ "ttl": "50h",
+ "max_ttl": "50h",
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ AcceptanceTest: true,
+ PreCheck: func() { testAccPreCheck(t) },
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testConfigWrite(t, config_data1),
+ testLoginWrite(t, login_data, expectedTTL1.Nanoseconds(), false),
+ testConfigWrite(t, config_data2),
+ testLoginWrite(t, login_data, expectedTTL2.Nanoseconds(), false),
+ testConfigWrite(t, config_data3),
+ testLoginWrite(t, login_data, 0, true),
+ },
+ })
+}
+
+func testLoginWrite(t *testing.T, d map[string]interface{}, expectedTTL int64, expectFail bool) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "login",
+ ErrorOk: true,
+ Data: d,
+ Check: func(resp *logical.Response) error {
+ if resp.IsError() && expectFail {
+ return nil
+ }
+ var actualTTL int64
+ actualTTL = resp.Auth.LeaseOptions.TTL.Nanoseconds()
+ if actualTTL != expectedTTL {
+ return fmt.Errorf("TTL mismatched. Expected: %d Actual: %d", expectedTTL, resp.Auth.LeaseOptions.TTL.Nanoseconds())
+ }
+ return nil
+ },
+ }
+}
+
+func testConfigWrite(t *testing.T, d map[string]interface{}) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config",
+ Data: d,
+ }
+}
+
+func TestBackend_basic(t *testing.T) {
+ defaultLeaseTTLVal := time.Hour * 24
+ maxLeaseTTLVal := time.Hour * 24 * 32
+ b, err := Factory(&logical.BackendConfig{
+ Logger: nil,
+ System: &logical.StaticSystemView{
+ DefaultLeaseTTLVal: defaultLeaseTTLVal,
+ MaxLeaseTTLVal: maxLeaseTTLVal,
+ },
+ })
+ if err != nil {
+ t.Fatalf("Unable to create backend: %s", err)
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ AcceptanceTest: true,
+ PreCheck: func() { testAccPreCheck(t) },
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepConfig(t, false),
+ testAccMap(t, "default", "fakepol"),
+ testAccMap(t, "oWnErs", "fakepol"),
+ testAccLogin(t, []string{"default", "fakepol"}),
+ testAccStepConfig(t, true),
+ testAccMap(t, "default", "fakepol"),
+ testAccMap(t, "oWnErs", "fakepol"),
+ testAccLogin(t, []string{"default", "fakepol"}),
+ testAccStepConfigWithBaseURL(t),
+ testAccMap(t, "default", "fakepol"),
+ testAccMap(t, "oWnErs", "fakepol"),
+ testAccLogin(t, []string{"default", "fakepol"}),
+ testAccMap(t, "default", "fakepol"),
+ testAccStepConfig(t, true),
+ mapUserToPolicy(t, os.Getenv("GITHUB_USER"), "userpolicy"),
+ testAccLogin(t, []string{"default", "fakepol", "userpolicy"}),
+ },
+ })
+}
+
+func testAccPreCheck(t *testing.T) {
+ if v := os.Getenv("GITHUB_TOKEN"); v == "" {
+ t.Fatal("GITHUB_TOKEN must be set for acceptance tests")
+ }
+
+ if v := os.Getenv("GITHUB_ORG"); v == "" {
+ t.Fatal("GITHUB_ORG must be set for acceptance tests")
+ }
+
+ if v := os.Getenv("GITHUB_BASEURL"); v == "" {
+ t.Fatal("GITHUB_BASEURL must be set for acceptance tests (use 'https://api.github.com' if you don't know what you're doing)")
+ }
+}
+
+func testAccStepConfig(t *testing.T, upper bool) logicaltest.TestStep {
+ ts := logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config",
+ Data: map[string]interface{}{
+ "organization": os.Getenv("GITHUB_ORG"),
+ },
+ }
+ if upper {
+ ts.Data["organization"] = strings.ToUpper(os.Getenv("GITHUB_ORG"))
+ }
+ return ts
+}
+
+func testAccStepConfigWithBaseURL(t *testing.T) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config",
+ Data: map[string]interface{}{
+ "organization": os.Getenv("GITHUB_ORG"),
+ "base_url": os.Getenv("GITHUB_BASEURL"),
+ },
+ }
+}
+
+func testAccMap(t *testing.T, k string, v string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "map/teams/" + k,
+ Data: map[string]interface{}{
+ "value": v,
+ },
+ }
+}
+
+func mapUserToPolicy(t *testing.T, k string, v string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "map/users/" + k,
+ Data: map[string]interface{}{
+ "value": v,
+ },
+ }
+}
+
+func testAccLogin(t *testing.T, policies []string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "login",
+ Data: map[string]interface{}{
+ "token": os.Getenv("GITHUB_TOKEN"),
+ },
+ Unauthenticated: true,
+
+ Check: logicaltest.TestCheckAuth(policies),
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/github/cli.go b/vendor/github.com/hashicorp/vault/builtin/credential/github/cli.go
new file mode 100644
index 0000000..dda1dac
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/github/cli.go
@@ -0,0 +1,58 @@
+package github
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/hashicorp/vault/api"
+)
+
+type CLIHandler struct{}
+
+func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) {
+ mount, ok := m["mount"]
+ if !ok {
+ mount = "github"
+ }
+
+ token, ok := m["token"]
+ if !ok {
+ if token = os.Getenv("VAULT_AUTH_GITHUB_TOKEN"); token == "" {
+ return "", fmt.Errorf("GitHub token should be provided either as 'value' for 'token' key,\nor via an env var VAULT_AUTH_GITHUB_TOKEN")
+ }
+ }
+
+ path := fmt.Sprintf("auth/%s/login", mount)
+ secret, err := c.Logical().Write(path, map[string]interface{}{
+ "token": token,
+ })
+ if err != nil {
+ return "", err
+ }
+ if secret == nil {
+ return "", fmt.Errorf("empty response from credential provider")
+ }
+
+ return secret.Auth.ClientToken, nil
+}
+
+func (h *CLIHandler) Help() string {
+ help := `
+The GitHub credential provider allows you to authenticate with GitHub.
+To use it, specify the "token" parameter. The value should be a personal access
+token for your GitHub account. You can generate a personal access token on your
+account settings page on GitHub.
+
+ Example: vault auth -method=github token=
+
+Key/Value Pairs:
+
+ mount=github The mountpoint for the GitHub credential provider.
+ Defaults to "github"
+
+ token= The GitHub personal access token for authentication.
+ `
+
+ return strings.TrimSpace(help)
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/github/path_config.go b/vendor/github.com/hashicorp/vault/builtin/credential/github/path_config.go
new file mode 100644
index 0000000..9db2e64
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/github/path_config.go
@@ -0,0 +1,138 @@
+package github
+
+import (
+ "fmt"
+ "net/url"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ "github.com/fatih/structs"
+)
+
+func pathConfig(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "config",
+ Fields: map[string]*framework.FieldSchema{
+ "organization": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "The organization users must be part of",
+ },
+
+ "base_url": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `The API endpoint to use. Useful if you
+are running GitHub Enterprise or an
+API-compatible authentication server.`,
+ },
+ "ttl": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `Duration after which authentication will be expired`,
+ },
+ "max_ttl": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `Maximum duration after which authentication will be expired`,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathConfigWrite,
+ logical.ReadOperation: b.pathConfigRead,
+ },
+ }
+}
+
+func (b *backend) pathConfigWrite(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ organization := data.Get("organization").(string)
+ baseURL := data.Get("base_url").(string)
+ if len(baseURL) != 0 {
+ _, err := url.Parse(baseURL)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("Error parsing given base_url: %s", err)), nil
+ }
+ }
+
+ var ttl time.Duration
+ var err error
+ ttlRaw, ok := data.GetOk("ttl")
+ if !ok || len(ttlRaw.(string)) == 0 {
+ ttl = 0
+ } else {
+ ttl, err = time.ParseDuration(ttlRaw.(string))
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("Invalid 'ttl':%s", err)), nil
+ }
+ }
+
+ var maxTTL time.Duration
+ maxTTLRaw, ok := data.GetOk("max_ttl")
+ if !ok || len(maxTTLRaw.(string)) == 0 {
+ maxTTL = 0
+ } else {
+ maxTTL, err = time.ParseDuration(maxTTLRaw.(string))
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("Invalid 'max_ttl':%s", err)), nil
+ }
+ }
+
+ entry, err := logical.StorageEntryJSON("config", config{
+ Organization: organization,
+ BaseURL: baseURL,
+ TTL: ttl,
+ MaxTTL: maxTTL,
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *backend) pathConfigRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ config, err := b.Config(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+
+ if config == nil {
+ return nil, fmt.Errorf("configuration object not found")
+ }
+
+ config.TTL /= time.Second
+ config.MaxTTL /= time.Second
+
+ resp := &logical.Response{
+ Data: structs.New(config).Map(),
+ }
+ return resp, nil
+}
+
+// Config returns the configuration for this backend.
+func (b *backend) Config(s logical.Storage) (*config, error) {
+ entry, err := s.Get("config")
+ if err != nil {
+ return nil, err
+ }
+
+ var result config
+ if entry != nil {
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, fmt.Errorf("error reading configuration: %s", err)
+ }
+ }
+
+ return &result, nil
+}
+
+type config struct {
+ Organization string `json:"organization" structs:"organization" mapstructure:"organization"`
+ BaseURL string `json:"base_url" structs:"base_url" mapstructure:"base_url"`
+ TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl"`
+ MaxTTL time.Duration `json:"max_ttl" structs:"max_ttl" mapstructure:"max_ttl"`
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/github/path_login.go b/vendor/github.com/hashicorp/vault/builtin/credential/github/path_login.go
new file mode 100644
index 0000000..0e212ab
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/github/path_login.go
@@ -0,0 +1,221 @@
+package github
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/google/go-github/github"
+ "github.com/hashicorp/vault/helper/policyutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathLogin(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "login",
+ Fields: map[string]*framework.FieldSchema{
+ "token": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "GitHub personal API token",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathLogin,
+ },
+ }
+}
+
+func (b *backend) pathLogin(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+
+ token := data.Get("token").(string)
+
+ var verifyResp *verifyCredentialsResp
+ if verifyResponse, resp, err := b.verifyCredentials(req, token); err != nil {
+ return nil, err
+ } else if resp != nil {
+ return resp, nil
+ } else {
+ verifyResp = verifyResponse
+ }
+
+ config, err := b.Config(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+
+ ttl, _, err := b.SanitizeTTLStr(config.TTL.String(), config.MaxTTL.String())
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("error sanitizing TTLs: %s", err)), nil
+ }
+
+ return &logical.Response{
+ Auth: &logical.Auth{
+ InternalData: map[string]interface{}{
+ "token": token,
+ },
+ Policies: verifyResp.Policies,
+ Metadata: map[string]string{
+ "username": *verifyResp.User.Login,
+ "org": *verifyResp.Org.Login,
+ },
+ DisplayName: *verifyResp.User.Login,
+ LeaseOptions: logical.LeaseOptions{
+ TTL: ttl,
+ Renewable: true,
+ },
+ },
+ }, nil
+}
+
+func (b *backend) pathLoginRenew(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+
+ if req.Auth == nil {
+ return nil, fmt.Errorf("request auth was nil")
+ }
+
+ tokenRaw, ok := req.Auth.InternalData["token"]
+ if !ok {
+ return nil, fmt.Errorf("token created in previous version of Vault cannot be validated properly at renewal time")
+ }
+ token := tokenRaw.(string)
+
+ var verifyResp *verifyCredentialsResp
+ if verifyResponse, resp, err := b.verifyCredentials(req, token); err != nil {
+ return nil, err
+ } else if resp != nil {
+ return resp, nil
+ } else {
+ verifyResp = verifyResponse
+ }
+ if !policyutil.EquivalentPolicies(verifyResp.Policies, req.Auth.Policies) {
+ return nil, fmt.Errorf("policies do not match")
+ }
+
+ config, err := b.Config(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+ return framework.LeaseExtend(config.TTL, config.MaxTTL, b.System())(req, d)
+}
+
+func (b *backend) verifyCredentials(req *logical.Request, token string) (*verifyCredentialsResp, *logical.Response, error) {
+ config, err := b.Config(req.Storage)
+ if err != nil {
+ return nil, nil, err
+ }
+ if config.Organization == "" {
+ return nil, logical.ErrorResponse(
+ "configure the github credential backend first"), nil
+ }
+
+ client, err := b.Client(token)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if config.BaseURL != "" {
+ parsedURL, err := url.Parse(config.BaseURL)
+ if err != nil {
+ return nil, nil, fmt.Errorf("Successfully parsed base_url when set but failing to parse now: %s", err)
+ }
+ client.BaseURL = parsedURL
+ }
+
+ // Get the user
+ user, _, err := client.Users.Get(context.Background(), "")
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Verify that the user is part of the organization
+ var org *github.Organization
+
+ orgOpt := &github.ListOptions{
+ PerPage: 100,
+ }
+
+ var allOrgs []*github.Organization
+ for {
+ orgs, resp, err := client.Organizations.List(context.Background(), "", orgOpt)
+ if err != nil {
+ return nil, nil, err
+ }
+ allOrgs = append(allOrgs, orgs...)
+ if resp.NextPage == 0 {
+ break
+ }
+ orgOpt.Page = resp.NextPage
+ }
+
+ for _, o := range allOrgs {
+ if strings.ToLower(*o.Login) == strings.ToLower(config.Organization) {
+ org = o
+ break
+ }
+ }
+ if org == nil {
+ return nil, logical.ErrorResponse("user is not part of required org"), nil
+ }
+
+ // Get the teams that this user is part of to determine the policies
+ var teamNames []string
+
+ teamOpt := &github.ListOptions{
+ PerPage: 100,
+ }
+
+ var allTeams []*github.Team
+ for {
+ teams, resp, err := client.Organizations.ListUserTeams(context.Background(), teamOpt)
+ if err != nil {
+ return nil, nil, err
+ }
+ allTeams = append(allTeams, teams...)
+ if resp.NextPage == 0 {
+ break
+ }
+ teamOpt.Page = resp.NextPage
+ }
+
+ for _, t := range allTeams {
+ // We only care about teams that are part of the organization we use
+ if *t.Organization.ID != *org.ID {
+ continue
+ }
+
+ // Append the names so we can get the policies
+ teamNames = append(teamNames, *t.Name)
+ if *t.Name != *t.Slug {
+ teamNames = append(teamNames, *t.Slug)
+ }
+ }
+
+ groupPoliciesList, err := b.TeamMap.Policies(req.Storage, teamNames...)
+
+ if err != nil {
+ return nil, nil, err
+ }
+
+ userPoliciesList, err := b.UserMap.Policies(req.Storage, []string{*user.Login}...)
+
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return &verifyCredentialsResp{
+ User: user,
+ Org: org,
+ Policies: append(groupPoliciesList, userPoliciesList...),
+ }, nil, nil
+}
+
+type verifyCredentialsResp struct {
+ User *github.User
+ Org *github.Organization
+ Policies []string
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/backend.go
new file mode 100644
index 0000000..d165626
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/backend.go
@@ -0,0 +1,404 @@
+package ldap
+
+import (
+ "bytes"
+ "fmt"
+ "text/template"
+
+ "github.com/go-ldap/ldap"
+ "github.com/hashicorp/vault/helper/mfa"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
+ return Backend().Setup(conf)
+}
+
+func Backend() *backend {
+ var b backend
+ b.Backend = &framework.Backend{
+ Help: backendHelp,
+
+ PathsSpecial: &logical.Paths{
+ Root: mfa.MFARootPaths(),
+
+ Unauthenticated: []string{
+ "login/*",
+ },
+ },
+
+ Paths: append([]*framework.Path{
+ pathConfig(&b),
+ pathGroups(&b),
+ pathGroupsList(&b),
+ pathUsers(&b),
+ pathUsersList(&b),
+ },
+ mfa.MFAPaths(b.Backend, pathLogin(&b))...,
+ ),
+
+ AuthRenew: b.pathLoginRenew,
+ }
+
+ return &b
+}
+
+type backend struct {
+ *framework.Backend
+}
+
+func EscapeLDAPValue(input string) string {
+ // RFC4514 forbids un-escaped:
+ // - leading space or hash
+ // - trailing space
+ // - special characters '"', '+', ',', ';', '<', '>', '\\'
+ // - null
+ for i := 0; i < len(input); i++ {
+ escaped := false
+ if input[i] == '\\' {
+ i++
+ escaped = true
+ }
+ switch input[i] {
+ case '"', '+', ',', ';', '<', '>', '\\':
+ if !escaped {
+ input = input[0:i] + "\\" + input[i:]
+ i++
+ }
+ continue
+ }
+ if escaped {
+ input = input[0:i] + "\\" + input[i:]
+ i++
+ }
+ }
+ if input[0] == ' ' || input[0] == '#' {
+ input = "\\" + input
+ }
+ if input[len(input)-1] == ' ' {
+ input = input[0:len(input)-1] + "\\ "
+ }
+ return input
+}
+
+func (b *backend) Login(req *logical.Request, username string, password string) ([]string, *logical.Response, error) {
+
+ cfg, err := b.Config(req)
+ if err != nil {
+ return nil, nil, err
+ }
+ if cfg == nil {
+ return nil, logical.ErrorResponse("ldap backend not configured"), nil
+ }
+
+ c, err := cfg.DialLDAP()
+ if err != nil {
+ return nil, logical.ErrorResponse(err.Error()), nil
+ }
+ if c == nil {
+ return nil, logical.ErrorResponse("invalid connection returned from LDAP dial"), nil
+ }
+
+ // Clean connection
+ defer c.Close()
+
+ userBindDN, err := b.getUserBindDN(cfg, c, username)
+ if err != nil {
+ return nil, logical.ErrorResponse(err.Error()), nil
+ }
+
+ if b.Logger().IsDebug() {
+ b.Logger().Debug("auth/ldap: User BindDN fetched", "username", username, "binddn", userBindDN)
+ }
+
+ if cfg.DenyNullBind && len(password) == 0 {
+ return nil, logical.ErrorResponse("password cannot be of zero length when passwordless binds are being denied"), nil
+ }
+
+ // Try to bind as the login user. This is where the actual authentication takes place.
+ if err = c.Bind(userBindDN, password); err != nil {
+ return nil, logical.ErrorResponse(fmt.Sprintf("LDAP bind failed: %v", err)), nil
+ }
+
+ // We re-bind to the BindDN if it's defined because we assume
+ // the BindDN should be the one to search, not the user logging in.
+ if cfg.BindDN != "" && cfg.BindPassword != "" {
+ if err := c.Bind(cfg.BindDN, cfg.BindPassword); err != nil {
+ return nil, logical.ErrorResponse(fmt.Sprintf("Encountered an error while attempting to re-bind with the BindDN User: %s", err.Error())), nil
+ }
+ if b.Logger().IsDebug() {
+ b.Logger().Debug("auth/ldap: Re-Bound to original BindDN")
+ }
+ }
+
+ userDN, err := b.getUserDN(cfg, c, userBindDN)
+ if err != nil {
+ return nil, logical.ErrorResponse(err.Error()), nil
+ }
+
+ ldapGroups, err := b.getLdapGroups(cfg, c, userDN, username)
+ if err != nil {
+ return nil, logical.ErrorResponse(err.Error()), nil
+ }
+ if b.Logger().IsDebug() {
+ b.Logger().Debug("auth/ldap: Groups fetched from server", "num_server_groups", len(ldapGroups), "server_groups", ldapGroups)
+ }
+
+ ldapResponse := &logical.Response{
+ Data: map[string]interface{}{},
+ }
+ if len(ldapGroups) == 0 {
+ errString := fmt.Sprintf(
+ "no LDAP groups found in groupDN '%s'; only policies from locally-defined groups available",
+ cfg.GroupDN)
+ ldapResponse.AddWarning(errString)
+ }
+
+ var allGroups []string
+ // Import the custom added groups from ldap backend
+ user, err := b.User(req.Storage, username)
+ if err == nil && user != nil && user.Groups != nil {
+ if b.Logger().IsDebug() {
+ b.Logger().Debug("auth/ldap: adding local groups", "num_local_groups", len(user.Groups), "local_groups", user.Groups)
+ }
+ allGroups = append(allGroups, user.Groups...)
+ }
+ // Merge local and LDAP groups
+ allGroups = append(allGroups, ldapGroups...)
+
+ // Retrieve policies
+ var policies []string
+ for _, groupName := range allGroups {
+ group, err := b.Group(req.Storage, groupName)
+ if err == nil && group != nil {
+ policies = append(policies, group.Policies...)
+ }
+ }
+ if user != nil && user.Policies != nil {
+ policies = append(policies, user.Policies...)
+ }
+ // Policies from each group may overlap
+ policies = strutil.RemoveDuplicates(policies, true)
+
+ if len(policies) == 0 {
+ errStr := "user is not a member of any authorized group"
+ if len(ldapResponse.Warnings()) > 0 {
+ errStr = fmt.Sprintf("%s; additionally, %s", errStr, ldapResponse.Warnings()[0])
+ }
+
+ ldapResponse.Data["error"] = errStr
+ return nil, ldapResponse, nil
+ }
+
+ return policies, ldapResponse, nil
+}
+
+/*
+ * Parses a distinguished name and returns the CN portion.
+ * Given a non-conforming string (such as an already-extracted CN),
+ * it will be returned as-is.
+ */
+func (b *backend) getCN(dn string) string {
+ parsedDN, err := ldap.ParseDN(dn)
+ if err != nil || len(parsedDN.RDNs) == 0 {
+ // It was already a CN, return as-is
+ return dn
+ }
+
+ for _, rdn := range parsedDN.RDNs {
+ for _, rdnAttr := range rdn.Attributes {
+ if rdnAttr.Type == "CN" {
+ return rdnAttr.Value
+ }
+ }
+ }
+
+ // Default, return self
+ return dn
+}
+
+/*
+ * Discover and return the bind string for the user attempting to authenticate.
+ * This is handled in one of several ways:
+ *
+ * 1. If DiscoverDN is set, the user object will be searched for using userdn (base search path)
+ * and userattr (the attribute that maps to the provided username).
+ * The bind will either be anonymous or use binddn and bindpassword if they were provided.
+ * 2. If upndomain is set, the user dn is constructed as 'username@upndomain'. See https://msdn.microsoft.com/en-us/library/cc223499.aspx
+ *
+ */
+func (b *backend) getUserBindDN(cfg *ConfigEntry, c *ldap.Conn, username string) (string, error) {
+ bindDN := ""
+ if cfg.DiscoverDN || (cfg.BindDN != "" && cfg.BindPassword != "") {
+ if err := c.Bind(cfg.BindDN, cfg.BindPassword); err != nil {
+ return bindDN, fmt.Errorf("LDAP bind (service) failed: %v", err)
+ }
+
+ filter := fmt.Sprintf("(%s=%s)", cfg.UserAttr, ldap.EscapeFilter(username))
+ if b.Logger().IsDebug() {
+ b.Logger().Debug("auth/ldap: Discovering user", "userdn", cfg.UserDN, "filter", filter)
+ }
+ result, err := c.Search(&ldap.SearchRequest{
+ BaseDN: cfg.UserDN,
+ Scope: 2, // subtree
+ Filter: filter,
+ })
+ if err != nil {
+ return bindDN, fmt.Errorf("LDAP search for binddn failed: %v", err)
+ }
+ if len(result.Entries) != 1 {
+ return bindDN, fmt.Errorf("LDAP search for binddn 0 or not unique")
+ }
+ bindDN = result.Entries[0].DN
+ } else {
+ if cfg.UPNDomain != "" {
+ bindDN = fmt.Sprintf("%s@%s", EscapeLDAPValue(username), cfg.UPNDomain)
+ } else {
+ bindDN = fmt.Sprintf("%s=%s,%s", cfg.UserAttr, EscapeLDAPValue(username), cfg.UserDN)
+ }
+ }
+
+ return bindDN, nil
+}
+
+/*
+ * Returns the DN of the object representing the authenticated user.
+ */
+func (b *backend) getUserDN(cfg *ConfigEntry, c *ldap.Conn, bindDN string) (string, error) {
+ userDN := ""
+ if cfg.UPNDomain != "" {
+ // Find the distinguished name for the user if userPrincipalName used for login
+ filter := fmt.Sprintf("(userPrincipalName=%s)", ldap.EscapeFilter(bindDN))
+ if b.Logger().IsDebug() {
+ b.Logger().Debug("auth/ldap: Searching UPN", "userdn", cfg.UserDN, "filter", filter)
+ }
+ result, err := c.Search(&ldap.SearchRequest{
+ BaseDN: cfg.UserDN,
+ Scope: 2, // subtree
+ Filter: filter,
+ })
+ if err != nil {
+ return userDN, fmt.Errorf("LDAP search failed for detecting user: %v", err)
+ }
+ for _, e := range result.Entries {
+ userDN = e.DN
+ }
+ } else {
+ userDN = bindDN
+ }
+
+ return userDN, nil
+}
+
+/*
+ * getLdapGroups queries LDAP and returns a slice describing the set of groups the authenticated user is a member of.
+ *
+ * The search query is constructed according to cfg.GroupFilter, and run in context of cfg.GroupDN.
+ * Groups will be resolved from the query results by following the attribute defined in cfg.GroupAttr.
+ *
+ * cfg.GroupFilter is a go template and is compiled with the following context: [UserDN, Username]
+ * UserDN - The DN of the authenticated user
+ * Username - The Username of the authenticated user
+ *
+ * Example:
+ * cfg.GroupFilter = "(&(objectClass=group)(member:1.2.840.113556.1.4.1941:={{.UserDN}}))"
+ * cfg.GroupDN = "OU=Groups,DC=myorg,DC=com"
+ * cfg.GroupAttr = "cn"
+ *
+ * NOTE - If cfg.GroupFilter is empty, no query is performed and an empty result slice is returned.
+ *
+ */
+func (b *backend) getLdapGroups(cfg *ConfigEntry, c *ldap.Conn, userDN string, username string) ([]string, error) {
+ // retrieve the groups in a string/bool map as a structure to avoid duplicates inside
+ ldapMap := make(map[string]bool)
+
+ if cfg.GroupFilter == "" {
+ b.Logger().Warn("auth/ldap: GroupFilter is empty, will not query server")
+ return make([]string, 0), nil
+ }
+
+ if cfg.GroupDN == "" {
+ b.Logger().Warn("auth/ldap: GroupDN is empty, will not query server")
+ return make([]string, 0), nil
+ }
+
+ // If groupfilter was defined, resolve it as a Go template and use the query for
+ // returning the user's groups
+ if b.Logger().IsDebug() {
+ b.Logger().Debug("auth/ldap: Compiling group filter", "group_filter", cfg.GroupFilter)
+ }
+
+ // Parse the configuration as a template.
+ // Example template "(&(objectClass=group)(member:1.2.840.113556.1.4.1941:={{.UserDN}}))"
+ t, err := template.New("queryTemplate").Parse(cfg.GroupFilter)
+ if err != nil {
+ return nil, fmt.Errorf("LDAP search failed due to template compilation error: %v", err)
+ }
+
+ // Build context to pass to template - we will be exposing UserDn and Username.
+ context := struct {
+ UserDN string
+ Username string
+ }{
+ ldap.EscapeFilter(userDN),
+ ldap.EscapeFilter(username),
+ }
+
+ var renderedQuery bytes.Buffer
+ t.Execute(&renderedQuery, context)
+
+ if b.Logger().IsDebug() {
+ b.Logger().Debug("auth/ldap: Searching", "groupdn", cfg.GroupDN, "rendered_query", renderedQuery.String())
+ }
+
+ result, err := c.Search(&ldap.SearchRequest{
+ BaseDN: cfg.GroupDN,
+ Scope: 2, // subtree
+ Filter: renderedQuery.String(),
+ Attributes: []string{
+ cfg.GroupAttr,
+ },
+ })
+ if err != nil {
+ return nil, fmt.Errorf("LDAP search failed: %v", err)
+ }
+
+ for _, e := range result.Entries {
+ dn, err := ldap.ParseDN(e.DN)
+ if err != nil || len(dn.RDNs) == 0 {
+ continue
+ }
+
+ // Enumerate attributes of each result, parse out CN and add as group
+ values := e.GetAttributeValues(cfg.GroupAttr)
+ if len(values) > 0 {
+ for _, val := range values {
+ groupCN := b.getCN(val)
+ ldapMap[groupCN] = true
+ }
+ } else {
+ // If groupattr didn't resolve, use self (enumerating group objects)
+ groupCN := b.getCN(e.DN)
+ ldapMap[groupCN] = true
+ }
+ }
+
+ ldapGroups := make([]string, 0, len(ldapMap))
+ for key, _ := range ldapMap {
+ ldapGroups = append(ldapGroups, key)
+ }
+
+ return ldapGroups, nil
+}
+
+const backendHelp = `
+The "ldap" credential provider allows authentication querying
+a LDAP server, checking username and password, and associating groups
+to set of policies.
+
+Configuration of the server is done through the "config" and "groups"
+endpoints by a user with root access. Authentication is then done
+by suppying the two fields for "login".
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/backend_test.go
new file mode 100644
index 0000000..51b4df7
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/backend_test.go
@@ -0,0 +1,540 @@
+package ldap
+
+import (
+ "fmt"
+ "reflect"
+ "sort"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+ logicaltest "github.com/hashicorp/vault/logical/testing"
+ "github.com/mitchellh/mapstructure"
+)
+
+func createBackendWithStorage(t *testing.T) (*backend, logical.Storage) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+
+ b := Backend()
+ if b == nil {
+ t.Fatalf("failed to create backend")
+ }
+
+ _, err := b.Backend.Setup(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ return b, config.StorageView
+}
+
+func TestLdapAuthBackend_UserPolicies(t *testing.T) {
+ var resp *logical.Response
+ var err error
+ b, storage := createBackendWithStorage(t)
+
+ configReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "config",
+ Data: map[string]interface{}{
+ // Online LDAP test server
+ // http://www.forumsys.com/tutorials/integration-how-to/ldap/online-ldap-test-server/
+ "url": "ldap://ldap.forumsys.com",
+ "userattr": "uid",
+ "userdn": "dc=example,dc=com",
+ "groupdn": "dc=example,dc=com",
+ "binddn": "cn=read-only-admin,dc=example,dc=com",
+ },
+ Storage: storage,
+ }
+ resp, err = b.HandleRequest(configReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ groupReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Data: map[string]interface{}{
+ "policies": "grouppolicy",
+ },
+ Path: "groups/engineers",
+ Storage: storage,
+ }
+ resp, err = b.HandleRequest(groupReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ userReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Data: map[string]interface{}{
+ "groups": "engineers",
+ "policies": "userpolicy",
+ },
+ Path: "users/tesla",
+ Storage: storage,
+ }
+
+ resp, err = b.HandleRequest(userReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ loginReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "login/tesla",
+ Data: map[string]interface{}{
+ "password": "password",
+ },
+ Storage: storage,
+ }
+
+ resp, err = b.HandleRequest(loginReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+ expected := []string{"default", "grouppolicy", "userpolicy"}
+ if !reflect.DeepEqual(expected, resp.Auth.Policies) {
+ t.Fatalf("bad: policies: expected: %q, actual: %q", expected, resp.Auth.Policies)
+ }
+}
+
+/*
+ * Acceptance test for LDAP Auth Backend
+ *
+ * The tests here rely on a public LDAP server:
+ * [http://www.forumsys.com/tutorials/integration-how-to/ldap/online-ldap-test-server/]
+ *
+ * ...as well as existence of a person object, `uid=tesla,dc=example,dc=com`,
+ * which is a member of a group, `ou=scientists,dc=example,dc=com`
+ *
+ * Querying the server from the command line:
+ * $ ldapsearch -x -H ldap://ldap.forumsys.com -b dc=example,dc=com -s sub \
+ * '(&(objectClass=groupOfUniqueNames)(uniqueMember=uid=tesla,dc=example,dc=com))'
+ *
+ * $ ldapsearch -x -H ldap://ldap.forumsys.com -b dc=example,dc=com -s sub uid=tesla
+ */
+func factory(t *testing.T) logical.Backend {
+ defaultLeaseTTLVal := time.Hour * 24
+ maxLeaseTTLVal := time.Hour * 24 * 32
+ b, err := Factory(&logical.BackendConfig{
+ Logger: nil,
+ System: &logical.StaticSystemView{
+ DefaultLeaseTTLVal: defaultLeaseTTLVal,
+ MaxLeaseTTLVal: maxLeaseTTLVal,
+ },
+ })
+ if err != nil {
+ t.Fatalf("Unable to create backend: %s", err)
+ }
+ return b
+}
+
+func TestBackend_basic(t *testing.T) {
+ b := factory(t)
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepConfigUrl(t),
+ // Map Scientists group (from LDAP server) with foo policy
+ testAccStepGroup(t, "Scientists", "foo"),
+
+ // Map engineers group (local) with bar policy
+ testAccStepGroup(t, "engineers", "bar"),
+
+ // Map tesla user with local engineers group
+ testAccStepUser(t, "tesla", "engineers"),
+
+ // Authenticate
+ testAccStepLogin(t, "tesla", "password"),
+
+ // Verify both groups mappings can be listed back
+ testAccStepGroupList(t, []string{"engineers", "Scientists"}),
+
+ // Verify user mapping can be listed back
+ testAccStepUserList(t, []string{"tesla"}),
+ },
+ })
+}
+
+func TestBackend_basic_authbind(t *testing.T) {
+ b := factory(t)
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepConfigUrlWithAuthBind(t),
+ testAccStepGroup(t, "Scientists", "foo"),
+ testAccStepGroup(t, "engineers", "bar"),
+ testAccStepUser(t, "tesla", "engineers"),
+ testAccStepLogin(t, "tesla", "password"),
+ },
+ })
+}
+
+func TestBackend_basic_discover(t *testing.T) {
+ b := factory(t)
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepConfigUrlWithDiscover(t),
+ testAccStepGroup(t, "Scientists", "foo"),
+ testAccStepGroup(t, "engineers", "bar"),
+ testAccStepUser(t, "tesla", "engineers"),
+ testAccStepLogin(t, "tesla", "password"),
+ },
+ })
+}
+
+func TestBackend_basic_nogroupdn(t *testing.T) {
+ b := factory(t)
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepConfigUrlNoGroupDN(t),
+ testAccStepGroup(t, "Scientists", "foo"),
+ testAccStepGroup(t, "engineers", "bar"),
+ testAccStepUser(t, "tesla", "engineers"),
+ testAccStepLoginNoGroupDN(t, "tesla", "password"),
+ },
+ })
+}
+
+func TestBackend_groupCrud(t *testing.T) {
+ b := factory(t)
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepGroup(t, "g1", "foo"),
+ testAccStepReadGroup(t, "g1", "default,foo"),
+ testAccStepDeleteGroup(t, "g1"),
+ testAccStepReadGroup(t, "g1", ""),
+ },
+ })
+}
+
+/*
+ * Test backend configuration defaults are successfully read.
+ */
+func TestBackend_configDefaultsAfterUpdate(t *testing.T) {
+ b := factory(t)
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config",
+ Data: map[string]interface{}{},
+ },
+ logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "config",
+ Check: func(resp *logical.Response) error {
+ if resp == nil {
+ return fmt.Errorf("bad: %#v", resp)
+ }
+
+ // Test well-known defaults
+ cfg := resp.Data
+ defaultGroupFilter := "(|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}}))"
+ if cfg["groupfilter"] != defaultGroupFilter {
+ t.Errorf("Default mismatch: groupfilter. Expected: '%s', received :'%s'", defaultGroupFilter, cfg["groupfilter"])
+ }
+
+ defaultGroupAttr := "cn"
+ if cfg["groupattr"] != defaultGroupAttr {
+ t.Errorf("Default mismatch: groupattr. Expected: '%s', received :'%s'", defaultGroupAttr, cfg["groupattr"])
+ }
+
+ defaultUserAttr := "cn"
+ if cfg["userattr"] != defaultUserAttr {
+ t.Errorf("Default mismatch: userattr. Expected: '%s', received :'%s'", defaultUserAttr, cfg["userattr"])
+ }
+
+ defaultDenyNullBind := true
+ if cfg["deny_null_bind"] != defaultDenyNullBind {
+ t.Errorf("Default mismatch: deny_null_bind. Expected: '%s', received :'%s'", defaultDenyNullBind, cfg["deny_null_bind"])
+ }
+
+ return nil
+ },
+ },
+ },
+ })
+}
+
+func testAccStepConfigUrl(t *testing.T) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config",
+ Data: map[string]interface{}{
+ // Online LDAP test server
+ // http://www.forumsys.com/tutorials/integration-how-to/ldap/online-ldap-test-server/
+ "url": "ldap://ldap.forumsys.com",
+ "userattr": "uid",
+ "userdn": "dc=example,dc=com",
+ "groupdn": "dc=example,dc=com",
+ },
+ }
+}
+
+func testAccStepConfigUrlWithAuthBind(t *testing.T) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config",
+ Data: map[string]interface{}{
+ // Online LDAP test server
+ // http://www.forumsys.com/tutorials/integration-how-to/ldap/online-ldap-test-server/
+ // In this test we also exercise multiple URL support
+ "url": "foobar://ldap.example.com,ldap://ldap.forumsys.com",
+ "userattr": "uid",
+ "userdn": "dc=example,dc=com",
+ "groupdn": "dc=example,dc=com",
+ "binddn": "cn=read-only-admin,dc=example,dc=com",
+ "bindpass": "password",
+ },
+ }
+}
+
+func testAccStepConfigUrlWithDiscover(t *testing.T) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config",
+ Data: map[string]interface{}{
+ // Online LDAP test server
+ // http://www.forumsys.com/tutorials/integration-how-to/ldap/online-ldap-test-server/
+ "url": "ldap://ldap.forumsys.com",
+ "userattr": "uid",
+ "userdn": "dc=example,dc=com",
+ "groupdn": "dc=example,dc=com",
+ "discoverdn": true,
+ },
+ }
+}
+
+func testAccStepConfigUrlNoGroupDN(t *testing.T) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config",
+ Data: map[string]interface{}{
+ // Online LDAP test server
+ // http://www.forumsys.com/tutorials/integration-how-to/ldap/online-ldap-test-server/
+ "url": "ldap://ldap.forumsys.com",
+ "userattr": "uid",
+ "userdn": "dc=example,dc=com",
+ "discoverdn": true,
+ },
+ }
+}
+
+func testAccStepGroup(t *testing.T, group string, policies string) logicaltest.TestStep {
+ t.Logf("[testAccStepGroup] - Registering group %s, policy %s", group, policies)
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "groups/" + group,
+ Data: map[string]interface{}{
+ "policies": policies,
+ },
+ }
+}
+
+func testAccStepReadGroup(t *testing.T, group string, policies string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "groups/" + group,
+ Check: func(resp *logical.Response) error {
+ if resp == nil {
+ if policies == "" {
+ return nil
+ }
+ return fmt.Errorf("bad: %#v", resp)
+ }
+
+ var d struct {
+ Policies string `mapstructure:"policies"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+
+ if d.Policies != policies {
+ return fmt.Errorf("bad: %#v", resp)
+ }
+
+ return nil
+ },
+ }
+}
+
+func testAccStepDeleteGroup(t *testing.T, group string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.DeleteOperation,
+ Path: "groups/" + group,
+ }
+}
+
+func TestBackend_userCrud(t *testing.T) {
+ b := Backend()
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepUser(t, "g1", "bar"),
+ testAccStepReadUser(t, "g1", "bar"),
+ testAccStepDeleteUser(t, "g1"),
+ testAccStepReadUser(t, "g1", ""),
+ },
+ })
+}
+
+func testAccStepUser(t *testing.T, user string, groups string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "users/" + user,
+ Data: map[string]interface{}{
+ "groups": groups,
+ },
+ }
+}
+
+func testAccStepReadUser(t *testing.T, user string, groups string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "users/" + user,
+ Check: func(resp *logical.Response) error {
+ if resp == nil {
+ if groups == "" {
+ return nil
+ }
+ return fmt.Errorf("bad: %#v", resp)
+ }
+
+ var d struct {
+ Groups string `mapstructure:"groups"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+
+ if d.Groups != groups {
+ return fmt.Errorf("bad: %#v", resp)
+ }
+
+ return nil
+ },
+ }
+}
+
+func testAccStepDeleteUser(t *testing.T, user string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.DeleteOperation,
+ Path: "users/" + user,
+ }
+}
+
+func testAccStepLogin(t *testing.T, user string, pass string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "login/" + user,
+ Data: map[string]interface{}{
+ "password": pass,
+ },
+ Unauthenticated: true,
+
+ // Verifies user tesla maps to groups via local group (engineers) as well as remote group (Scientiests)
+ Check: logicaltest.TestCheckAuth([]string{"bar", "default", "foo"}),
+ }
+}
+
+func testAccStepLoginNoGroupDN(t *testing.T, user string, pass string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "login/" + user,
+ Data: map[string]interface{}{
+ "password": pass,
+ },
+ Unauthenticated: true,
+
+ // Verifies a search without defined GroupDN returns a warnting rather than failing
+ Check: func(resp *logical.Response) error {
+ if len(resp.Warnings()) != 1 {
+ return fmt.Errorf("expected a warning due to no group dn, got: %#v", resp.Warnings())
+ }
+
+ return logicaltest.TestCheckAuth([]string{"bar", "default"})(resp)
+ },
+ }
+}
+
+func TestLDAPEscape(t *testing.T) {
+ testcases := map[string]string{
+ "#test": "\\#test",
+ "test,hello": "test\\,hello",
+ "test,hel+lo": "test\\,hel\\+lo",
+ "test\\hello": "test\\\\hello",
+ " test ": "\\ test \\ ",
+ }
+
+ for test, answer := range testcases {
+ res := EscapeLDAPValue(test)
+ if res != answer {
+ t.Errorf("Failed to escape %s: %s != %s\n", test, res, answer)
+ }
+ }
+}
+
+func testAccStepGroupList(t *testing.T, groups []string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ListOperation,
+ Path: "groups",
+ Check: func(resp *logical.Response) error {
+ if resp.IsError() {
+ return fmt.Errorf("Got error response: %#v", *resp)
+ }
+
+ expected := make([]string, len(groups))
+ copy(expected, groups)
+ sort.Strings(expected)
+
+ sortedResponse := make([]string, len(resp.Data["keys"].([]string)))
+ copy(sortedResponse, resp.Data["keys"].([]string))
+ sort.Strings(sortedResponse)
+
+ if !reflect.DeepEqual(expected, sortedResponse) {
+ return fmt.Errorf("expected:\n%#v\ngot:\n%#v\n", expected, sortedResponse)
+ }
+ return nil
+ },
+ }
+}
+
+func testAccStepUserList(t *testing.T, users []string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ListOperation,
+ Path: "users",
+ Check: func(resp *logical.Response) error {
+ if resp.IsError() {
+ return fmt.Errorf("Got error response: %#v", *resp)
+ }
+
+ expected := make([]string, len(users))
+ copy(expected, users)
+ sort.Strings(expected)
+
+ sortedResponse := make([]string, len(resp.Data["keys"].([]string)))
+ copy(sortedResponse, resp.Data["keys"].([]string))
+ sort.Strings(sortedResponse)
+
+ if !reflect.DeepEqual(expected, sortedResponse) {
+ return fmt.Errorf("expected:\n%#v\ngot:\n%#v\n", expected, sortedResponse)
+ }
+ return nil
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/cli.go b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/cli.go
new file mode 100644
index 0000000..e4d151f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/cli.go
@@ -0,0 +1,89 @@
+package ldap
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/hashicorp/vault/api"
+ pwd "github.com/hashicorp/vault/helper/password"
+)
+
+type CLIHandler struct{}
+
+func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) {
+ mount, ok := m["mount"]
+ if !ok {
+ mount = "ldap"
+ }
+
+ username, ok := m["username"]
+ if !ok {
+ username = usernameFromEnv()
+ if username == "" {
+ return "", fmt.Errorf("'username' not supplied and neither 'LOGNAME' nor 'USER' env vars set")
+ }
+ }
+ password, ok := m["password"]
+ if !ok {
+ fmt.Printf("Password (will be hidden): ")
+ var err error
+ password, err = pwd.Read(os.Stdin)
+ fmt.Println()
+ if err != nil {
+ return "", err
+ }
+ }
+
+ data := map[string]interface{}{
+ "password": password,
+ }
+
+ mfa_method, ok := m["method"]
+ if ok {
+ data["method"] = mfa_method
+ }
+ mfa_passcode, ok := m["passcode"]
+ if ok {
+ data["passcode"] = mfa_passcode
+ }
+
+ path := fmt.Sprintf("auth/%s/login/%s", mount, username)
+ secret, err := c.Logical().Write(path, data)
+ if err != nil {
+ return "", err
+ }
+ if secret == nil {
+ return "", fmt.Errorf("empty response from credential provider")
+ }
+
+ return secret.Auth.ClientToken, nil
+}
+
+func (h *CLIHandler) Help() string {
+ help := `
+The LDAP credential provider allows you to authenticate with LDAP.
+To use it, first configure it through the "config" endpoint, and then
+login by specifying username and password. If password is not provided
+on the command line, it will be read from stdin.
+
+If multi-factor authentication (MFA) is enabled, a "method" and/or "passcode"
+may be provided depending on the MFA backend enabled. To check
+which MFA backend is in use, read "auth/[mount]/mfa_config".
+
+ Example: vault auth -method=ldap username=john
+
+ `
+
+ return strings.TrimSpace(help)
+}
+
+func usernameFromEnv() string {
+ if logname := os.Getenv("LOGNAME"); logname != "" {
+ return logname
+ }
+ if user := os.Getenv("USER"); user != "" {
+ return user
+ }
+ return ""
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_config.go b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_config.go
new file mode 100644
index 0000000..4fc772e
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_config.go
@@ -0,0 +1,473 @@
+package ldap
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "net"
+ "net/url"
+ "strings"
+ "text/template"
+
+ "github.com/fatih/structs"
+ "github.com/go-ldap/ldap"
+ multierror "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/vault/helper/tlsutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ log "github.com/mgutz/logxi/v1"
+)
+
+func pathConfig(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: `config`,
+ Fields: map[string]*framework.FieldSchema{
+ "url": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "ldap://127.0.0.1",
+ Description: "LDAP URL to connect to (default: ldap://127.0.0.1). Multiple URLs can be specified by concatenating them with commas; they will be tried in-order.",
+ },
+
+ "userdn": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "LDAP domain to use for users (eg: ou=People,dc=example,dc=org)",
+ },
+
+ "binddn": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "LDAP DN for searching for the user DN (optional)",
+ },
+
+ "bindpass": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "LDAP password for searching for the user DN (optional)",
+ },
+
+ "groupdn": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "LDAP search base to use for group membership search (eg: ou=Groups,dc=example,dc=org)",
+ },
+
+ "groupfilter": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "(|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}}))",
+ Description: `Go template for querying group membership of user (optional)
+The template can access the following context variables: UserDN, Username
+Example: (&(objectClass=group)(member:1.2.840.113556.1.4.1941:={{.UserDN}}))
+Default: (|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}}))`,
+ },
+
+ "groupattr": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "cn",
+ Description: `LDAP attribute to follow on objects returned by
+in order to enumerate user group membership.
+Examples: "cn" or "memberOf", etc.
+Default: cn`,
+ },
+
+ "upndomain": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Enables userPrincipalDomain login with [username]@UPNDomain (optional)",
+ },
+
+ "userattr": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "cn",
+ Description: "Attribute used for users (default: cn)",
+ },
+
+ "certificate": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "CA certificate to use when verifying LDAP server certificate, must be x509 PEM encoded (optional)",
+ },
+
+ "discoverdn": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Description: "Use anonymous bind to discover the bind DN of a user (optional)",
+ },
+
+ "insecure_tls": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Description: "Skip LDAP server SSL Certificate verification - VERY insecure (optional)",
+ },
+
+ "starttls": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Description: "Issue a StartTLS command after establishing unencrypted connection (optional)",
+ },
+
+ "tls_min_version": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "tls12",
+ Description: "Minimum TLS version to use. Accepted values are 'tls10', 'tls11' or 'tls12'. Defaults to 'tls12'",
+ },
+
+ "tls_max_version": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "tls12",
+ Description: "Maximum TLS version to use. Accepted values are 'tls10', 'tls11' or 'tls12'. Defaults to 'tls12'",
+ },
+ "deny_null_bind": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: true,
+ Description: "Denies an unauthenticated LDAP bind request if the user's password is empty; defaults to true",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathConfigRead,
+ logical.UpdateOperation: b.pathConfigWrite,
+ },
+
+ HelpSynopsis: pathConfigHelpSyn,
+ HelpDescription: pathConfigHelpDesc,
+ }
+}
+
+/*
+ * Construct ConfigEntry struct using stored configuration.
+ */
+func (b *backend) Config(req *logical.Request) (*ConfigEntry, error) {
+ // Schema for ConfigEntry
+ fd, err := b.getConfigFieldData()
+ if err != nil {
+ return nil, err
+ }
+
+ // Create a new ConfigEntry, filling in defaults where appropriate
+ result, err := b.newConfigEntry(fd)
+ if err != nil {
+ return nil, err
+ }
+
+ storedConfig, err := req.Storage.Get("config")
+ if err != nil {
+ return nil, err
+ }
+
+ if storedConfig == nil {
+ // No user overrides, return default configuration
+ return result, nil
+ }
+
+ // Deserialize stored configuration.
+ // Fields not specified in storedConfig will retain their defaults.
+ if err := storedConfig.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ result.logger = b.Logger()
+
+ return result, nil
+}
+
+func (b *backend) pathConfigRead(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+
+ cfg, err := b.Config(req)
+ if err != nil {
+ return nil, err
+ }
+ if cfg == nil {
+ return nil, nil
+ }
+
+ resp := &logical.Response{
+ Data: structs.New(cfg).Map(),
+ }
+ resp.AddWarning("Read access to this endpoint should be controlled via ACLs as it will return the configuration information as-is, including any passwords.")
+ return resp, nil
+}
+
+/*
+ * Creates and initializes a ConfigEntry object with its default values,
+ * as specified by the passed schema.
+ */
+func (b *backend) newConfigEntry(d *framework.FieldData) (*ConfigEntry, error) {
+ cfg := new(ConfigEntry)
+
+ cfg.logger = b.Logger()
+
+ url := d.Get("url").(string)
+ if url != "" {
+ cfg.Url = strings.ToLower(url)
+ }
+ userattr := d.Get("userattr").(string)
+ if userattr != "" {
+ cfg.UserAttr = strings.ToLower(userattr)
+ }
+ userdn := d.Get("userdn").(string)
+ if userdn != "" {
+ cfg.UserDN = userdn
+ }
+ groupdn := d.Get("groupdn").(string)
+ if groupdn != "" {
+ cfg.GroupDN = groupdn
+ }
+ groupfilter := d.Get("groupfilter").(string)
+ if groupfilter != "" {
+ // Validate the template before proceeding
+ _, err := template.New("queryTemplate").Parse(groupfilter)
+ if err != nil {
+ return nil, fmt.Errorf("invalid groupfilter (%v)", err)
+ }
+
+ cfg.GroupFilter = groupfilter
+ }
+ groupattr := d.Get("groupattr").(string)
+ if groupattr != "" {
+ cfg.GroupAttr = groupattr
+ }
+ upndomain := d.Get("upndomain").(string)
+ if upndomain != "" {
+ cfg.UPNDomain = upndomain
+ }
+ certificate := d.Get("certificate").(string)
+ if certificate != "" {
+ cfg.Certificate = certificate
+ }
+ insecureTLS := d.Get("insecure_tls").(bool)
+ if insecureTLS {
+ cfg.InsecureTLS = insecureTLS
+ }
+ cfg.TLSMinVersion = d.Get("tls_min_version").(string)
+ if cfg.TLSMinVersion == "" {
+ return nil, fmt.Errorf("failed to get 'tls_min_version' value")
+ }
+
+ var ok bool
+ _, ok = tlsutil.TLSLookup[cfg.TLSMinVersion]
+ if !ok {
+ return nil, fmt.Errorf("invalid 'tls_min_version'")
+ }
+
+ cfg.TLSMaxVersion = d.Get("tls_max_version").(string)
+ if cfg.TLSMaxVersion == "" {
+ return nil, fmt.Errorf("failed to get 'tls_max_version' value")
+ }
+
+ _, ok = tlsutil.TLSLookup[cfg.TLSMaxVersion]
+ if !ok {
+ return nil, fmt.Errorf("invalid 'tls_max_version'")
+ }
+ if cfg.TLSMaxVersion < cfg.TLSMinVersion {
+ return nil, fmt.Errorf("'tls_max_version' must be greater than or equal to 'tls_min_version'")
+ }
+
+ startTLS := d.Get("starttls").(bool)
+ if startTLS {
+ cfg.StartTLS = startTLS
+ }
+ bindDN := d.Get("binddn").(string)
+ if bindDN != "" {
+ cfg.BindDN = bindDN
+ }
+ bindPass := d.Get("bindpass").(string)
+ if bindPass != "" {
+ cfg.BindPassword = bindPass
+ }
+ denyNullBind := d.Get("deny_null_bind").(bool)
+ if denyNullBind {
+ cfg.DenyNullBind = denyNullBind
+ }
+ discoverDN := d.Get("discoverdn").(bool)
+ if discoverDN {
+ cfg.DiscoverDN = discoverDN
+ }
+
+ return cfg, nil
+}
+
+func (b *backend) pathConfigWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+
+ // Build a ConfigEntry struct out of the supplied FieldData
+ cfg, err := b.newConfigEntry(d)
+ if err != nil {
+ return logical.ErrorResponse(err.Error()), nil
+ }
+
+ entry, err := logical.StorageEntryJSON("config", cfg)
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+type ConfigEntry struct {
+ logger log.Logger
+ Url string `json:"url" structs:"url" mapstructure:"url"`
+ UserDN string `json:"userdn" structs:"userdn" mapstructure:"userdn"`
+ GroupDN string `json:"groupdn" structs:"groupdn" mapstructure:"groupdn"`
+ GroupFilter string `json:"groupfilter" structs:"groupfilter" mapstructure:"groupfilter"`
+ GroupAttr string `json:"groupattr" structs:"groupattr" mapstructure:"groupattr"`
+ UPNDomain string `json:"upndomain" structs:"upndomain" mapstructure:"upndomain"`
+ UserAttr string `json:"userattr" structs:"userattr" mapstructure:"userattr"`
+ Certificate string `json:"certificate" structs:"certificate" mapstructure:"certificate"`
+ InsecureTLS bool `json:"insecure_tls" structs:"insecure_tls" mapstructure:"insecure_tls"`
+ StartTLS bool `json:"starttls" structs:"starttls" mapstructure:"starttls"`
+ BindDN string `json:"binddn" structs:"binddn" mapstructure:"binddn"`
+ BindPassword string `json:"bindpass" structs:"bindpass" mapstructure:"bindpass"`
+ DenyNullBind bool `json:"deny_null_bind" structs:"deny_null_bind" mapstructure:"deny_null_bind"`
+ DiscoverDN bool `json:"discoverdn" structs:"discoverdn" mapstructure:"discoverdn"`
+ TLSMinVersion string `json:"tls_min_version" structs:"tls_min_version" mapstructure:"tls_min_version"`
+ TLSMaxVersion string `json:"tls_max_version" structs:"tls_max_version" mapstructure:"tls_max_version"`
+}
+
+func (c *ConfigEntry) GetTLSConfig(host string) (*tls.Config, error) {
+ tlsConfig := &tls.Config{
+ ServerName: host,
+ }
+
+ if c.TLSMinVersion != "" {
+ tlsMinVersion, ok := tlsutil.TLSLookup[c.TLSMinVersion]
+ if !ok {
+ return nil, fmt.Errorf("invalid 'tls_min_version' in config")
+ }
+ tlsConfig.MinVersion = tlsMinVersion
+ }
+
+ if c.TLSMaxVersion != "" {
+ tlsMaxVersion, ok := tlsutil.TLSLookup[c.TLSMaxVersion]
+ if !ok {
+ return nil, fmt.Errorf("invalid 'tls_max_version' in config")
+ }
+ tlsConfig.MaxVersion = tlsMaxVersion
+ }
+
+ if c.InsecureTLS {
+ tlsConfig.InsecureSkipVerify = true
+ }
+ if c.Certificate != "" {
+ caPool := x509.NewCertPool()
+ ok := caPool.AppendCertsFromPEM([]byte(c.Certificate))
+ if !ok {
+ return nil, fmt.Errorf("could not append CA certificate")
+ }
+ tlsConfig.RootCAs = caPool
+ }
+ return tlsConfig, nil
+}
+
+func (c *ConfigEntry) DialLDAP() (*ldap.Conn, error) {
+ var retErr *multierror.Error
+ var conn *ldap.Conn
+ urls := strings.Split(c.Url, ",")
+ for _, uut := range urls {
+ u, err := url.Parse(uut)
+ if err != nil {
+ retErr = multierror.Append(retErr, fmt.Errorf("error parsing url %q: %s", uut, err.Error()))
+ continue
+ }
+ host, port, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ host = u.Host
+ }
+
+ var tlsConfig *tls.Config
+ switch u.Scheme {
+ case "ldap":
+ if port == "" {
+ port = "389"
+ }
+ conn, err = ldap.Dial("tcp", net.JoinHostPort(host, port))
+ if err != nil {
+ break
+ }
+ if conn == nil {
+ err = fmt.Errorf("empty connection after dialing")
+ break
+ }
+ if c.StartTLS {
+ tlsConfig, err = c.GetTLSConfig(host)
+ if err != nil {
+ break
+ }
+ err = conn.StartTLS(tlsConfig)
+ }
+ case "ldaps":
+ if port == "" {
+ port = "636"
+ }
+ tlsConfig, err = c.GetTLSConfig(host)
+ if err != nil {
+ break
+ }
+ conn, err = ldap.DialTLS("tcp", net.JoinHostPort(host, port), tlsConfig)
+ default:
+ retErr = multierror.Append(retErr, fmt.Errorf("invalid LDAP scheme in url %q", net.JoinHostPort(host, port)))
+ continue
+ }
+ if err == nil {
+ if retErr != nil {
+ if c.logger.IsDebug() {
+ c.logger.Debug("ldap: errors connecting to some hosts: %s", retErr.Error())
+ }
+ }
+ retErr = nil
+ break
+ }
+ retErr = multierror.Append(retErr, fmt.Errorf("error connecting to host %q: %s", uut, err.Error()))
+ }
+
+ return conn, retErr.ErrorOrNil()
+}
+
+/*
+ * Returns FieldData describing our ConfigEntry struct schema
+ */
+func (b *backend) getConfigFieldData() (*framework.FieldData, error) {
+ configPath := b.Route("config")
+
+ if configPath == nil {
+ return nil, logical.ErrUnsupportedPath
+ }
+
+ raw := make(map[string]interface{}, len(configPath.Fields))
+
+ fd := framework.FieldData{
+ Raw: raw,
+ Schema: configPath.Fields,
+ }
+
+ return &fd, nil
+}
+
+const pathConfigHelpSyn = `
+Configure the LDAP server to connect to, along with its options.
+`
+
+const pathConfigHelpDesc = `
+This endpoint allows you to configure the LDAP server to connect to and its
+configuration options.
+
+The LDAP URL can use either the "ldap://" or "ldaps://" schema. In the former
+case, an unencrypted connection will be made with a default port of 389, unless
+the "starttls" parameter is set to true, in which case TLS will be used. In the
+latter case, a SSL connection will be established with a default port of 636.
+
+## A NOTE ON ESCAPING
+
+It is up to the administrator to provide properly escaped DNs. This includes
+the user DN, bind DN for search, and so on.
+
+The only DN escaping performed by this backend is on usernames given at login
+time when they are inserted into the final bind DN, and uses escaping rules
+defined in RFC 4514.
+
+Additionally, Active Directory has escaping rules that differ slightly from the
+RFC; in particular it requires escaping of '#' regardless of position in the DN
+(the RFC only requires it to be escaped when it is the first character), and
+'=', which the RFC indicates can be escaped with a backslash, but does not
+contain in its set of required escapes. If you are using Active Directory and
+these appear in your usernames, please ensure that they are escaped, in
+addition to being properly escaped in your configured DNs.
+
+For reference, see https://www.ietf.org/rfc/rfc4514.txt and
+http://social.technet.microsoft.com/wiki/contents/articles/5312.active-directory-characters-to-escape.aspx
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_groups.go b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_groups.go
new file mode 100644
index 0000000..998fdc4
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_groups.go
@@ -0,0 +1,135 @@
+package ldap
+
+import (
+ "strings"
+
+ "github.com/hashicorp/vault/helper/policyutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathGroupsList(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "groups/?$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.pathGroupList,
+ },
+
+ HelpSynopsis: pathGroupHelpSyn,
+ HelpDescription: pathGroupHelpDesc,
+ }
+}
+
+func pathGroups(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: `groups/(?P.+)`,
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the LDAP group.",
+ },
+
+ "policies": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Comma-separated list of policies associated to the group.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.DeleteOperation: b.pathGroupDelete,
+ logical.ReadOperation: b.pathGroupRead,
+ logical.UpdateOperation: b.pathGroupWrite,
+ },
+
+ HelpSynopsis: pathGroupHelpSyn,
+ HelpDescription: pathGroupHelpDesc,
+ }
+}
+
+func (b *backend) Group(s logical.Storage, n string) (*GroupEntry, error) {
+ entry, err := s.Get("group/" + n)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result GroupEntry
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+func (b *backend) pathGroupDelete(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ err := req.Storage.Delete("group/" + d.Get("name").(string))
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *backend) pathGroupRead(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ group, err := b.Group(req.Storage, d.Get("name").(string))
+ if err != nil {
+ return nil, err
+ }
+ if group == nil {
+ return nil, nil
+ }
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "policies": strings.Join(group.Policies, ","),
+ },
+ }, nil
+}
+
+func (b *backend) pathGroupWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ // Store it
+ entry, err := logical.StorageEntryJSON("group/"+d.Get("name").(string), &GroupEntry{
+ Policies: policyutil.ParsePolicies(d.Get("policies").(string)),
+ })
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *backend) pathGroupList(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ groups, err := req.Storage.List("group/")
+ if err != nil {
+ return nil, err
+ }
+ return logical.ListResponse(groups), nil
+}
+
+type GroupEntry struct {
+ Policies []string
+}
+
+const pathGroupHelpSyn = `
+Manage users allowed to authenticate.
+`
+
+const pathGroupHelpDesc = `
+This endpoint allows you to create, read, update, and delete configuration
+for LDAP groups that are allowed to authenticate, and associate policies to
+them.
+
+Deleting a group will not revoke auth for prior authenticated users in that
+group. To do this, do a revoke on "login/" for
+the usernames you want revoked.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_login.go b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_login.go
new file mode 100644
index 0000000..e859adb
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_login.go
@@ -0,0 +1,100 @@
+package ldap
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/hashicorp/vault/helper/policyutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathLogin(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: `login/(?P.+)`,
+ Fields: map[string]*framework.FieldSchema{
+ "username": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "DN (distinguished name) to be used for login.",
+ },
+
+ "password": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Password for this user.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathLogin,
+ },
+
+ HelpSynopsis: pathLoginSyn,
+ HelpDescription: pathLoginDesc,
+ }
+}
+
+func (b *backend) pathLogin(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ username := d.Get("username").(string)
+ password := d.Get("password").(string)
+
+ policies, resp, err := b.Login(req, username, password)
+ // Handle an internal error
+ if err != nil {
+ return nil, err
+ }
+ if resp != nil {
+ // Handle a logical error
+ if resp.IsError() {
+ return resp, nil
+ }
+ } else {
+ resp = &logical.Response{}
+ }
+
+ sort.Strings(policies)
+
+ resp.Auth = &logical.Auth{
+ Policies: policies,
+ Metadata: map[string]string{
+ "username": username,
+ "policies": strings.Join(policies, ","),
+ },
+ InternalData: map[string]interface{}{
+ "password": password,
+ },
+ DisplayName: username,
+ LeaseOptions: logical.LeaseOptions{
+ Renewable: true,
+ },
+ }
+ return resp, nil
+}
+
+func (b *backend) pathLoginRenew(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+
+ username := req.Auth.Metadata["username"]
+ password := req.Auth.InternalData["password"].(string)
+
+ loginPolicies, resp, err := b.Login(req, username, password)
+ if len(loginPolicies) == 0 {
+ return resp, err
+ }
+
+ if !policyutil.EquivalentPolicies(loginPolicies, req.Auth.Policies) {
+ return nil, fmt.Errorf("policies have changed, not renewing")
+ }
+
+ return framework.LeaseExtend(0, 0, b.System())(req, d)
+}
+
+const pathLoginSyn = `
+Log in with a username and password.
+`
+
+const pathLoginDesc = `
+This endpoint authenticates using a username and password. Please be sure to
+read the note on escaping from the path-help for the 'config' endpoint.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_users.go b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_users.go
new file mode 100644
index 0000000..605f779
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_users.go
@@ -0,0 +1,150 @@
+package ldap
+
+import (
+ "strings"
+
+ "github.com/hashicorp/vault/helper/policyutil"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathUsersList(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "users/?$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.pathUserList,
+ },
+
+ HelpSynopsis: pathUserHelpSyn,
+ HelpDescription: pathUserHelpDesc,
+ }
+}
+
+func pathUsers(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: `users/(?P.+)`,
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the LDAP user.",
+ },
+
+ "groups": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Comma-separated list of additional groups associated with the user.",
+ },
+
+ "policies": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Comma-separated list of policies associated with the user.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.DeleteOperation: b.pathUserDelete,
+ logical.ReadOperation: b.pathUserRead,
+ logical.UpdateOperation: b.pathUserWrite,
+ },
+
+ HelpSynopsis: pathUserHelpSyn,
+ HelpDescription: pathUserHelpDesc,
+ }
+}
+
+func (b *backend) User(s logical.Storage, n string) (*UserEntry, error) {
+ entry, err := s.Get("user/" + n)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result UserEntry
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+func (b *backend) pathUserDelete(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ err := req.Storage.Delete("user/" + d.Get("name").(string))
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *backend) pathUserRead(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ user, err := b.User(req.Storage, d.Get("name").(string))
+ if err != nil {
+ return nil, err
+ }
+ if user == nil {
+ return nil, nil
+ }
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "groups": strings.Join(user.Groups, ","),
+ "policies": strings.Join(user.Policies, ","),
+ },
+ }, nil
+}
+
+func (b *backend) pathUserWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ name := d.Get("name").(string)
+ groups := strutil.RemoveDuplicates(strutil.ParseStringSlice(d.Get("groups").(string), ","), false)
+ policies := policyutil.ParsePolicies(d.Get("policies").(string))
+ for i, g := range groups {
+ groups[i] = strings.TrimSpace(g)
+ }
+
+ // Store it
+ entry, err := logical.StorageEntryJSON("user/"+name, &UserEntry{
+ Groups: groups,
+ Policies: policies,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *backend) pathUserList(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ users, err := req.Storage.List("user/")
+ if err != nil {
+ return nil, err
+ }
+ return logical.ListResponse(users), nil
+}
+
+type UserEntry struct {
+ Groups []string
+ Policies []string
+}
+
+const pathUserHelpSyn = `
+Manage additional groups for users allowed to authenticate.
+`
+
+const pathUserHelpDesc = `
+This endpoint allows you to create, read, update, and delete configuration
+for LDAP users that are allowed to authenticate, in particular associating
+additional groups to them.
+
+Deleting a user will not revoke their auth. To do this, do a revoke on "login/" for
+the usernames you want revoked.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/okta/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/okta/backend.go
new file mode 100644
index 0000000..43a1647
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/okta/backend.go
@@ -0,0 +1,143 @@
+package okta
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
+ return Backend().Setup(conf)
+}
+
+func Backend() *backend {
+ var b backend
+ b.Backend = &framework.Backend{
+ Help: backendHelp,
+
+ PathsSpecial: &logical.Paths{
+ Unauthenticated: []string{
+ "login/*",
+ },
+ },
+
+ Paths: append([]*framework.Path{
+ pathConfig(&b),
+ pathUsers(&b),
+ pathGroups(&b),
+ pathUsersList(&b),
+ pathGroupsList(&b),
+ pathLogin(&b),
+ }),
+
+ AuthRenew: b.pathLoginRenew,
+ }
+
+ return &b
+}
+
+type backend struct {
+ *framework.Backend
+}
+
+func (b *backend) Login(req *logical.Request, username string, password string) ([]string, *logical.Response, error) {
+ cfg, err := b.Config(req.Storage)
+ if err != nil {
+ return nil, nil, err
+ }
+ if cfg == nil {
+ return nil, logical.ErrorResponse("Okta backend not configured"), nil
+ }
+
+ client := cfg.OktaClient()
+ auth, err := client.Authenticate(username, password)
+ if err != nil {
+ return nil, logical.ErrorResponse(fmt.Sprintf("Okta auth failed: %v", err)), nil
+ }
+ if auth == nil {
+ return nil, logical.ErrorResponse("okta auth backend unexpected failure"), nil
+ }
+
+ oktaGroups, err := b.getOktaGroups(cfg, auth.Embedded.User.ID)
+ if err != nil {
+ return nil, logical.ErrorResponse(err.Error()), nil
+ }
+ if b.Logger().IsDebug() {
+ b.Logger().Debug("auth/okta: Groups fetched from Okta", "num_groups", len(oktaGroups), "groups", oktaGroups)
+ }
+
+ oktaResponse := &logical.Response{
+ Data: map[string]interface{}{},
+ }
+ if len(oktaGroups) == 0 {
+ errString := fmt.Sprintf(
+ "no Okta groups found; only policies from locally-defined groups available")
+ oktaResponse.AddWarning(errString)
+ }
+
+ var allGroups []string
+ // Import the custom added groups from okta backend
+ user, err := b.User(req.Storage, username)
+ if err == nil && user != nil && user.Groups != nil {
+ if b.Logger().IsDebug() {
+ b.Logger().Debug("auth/okta: adding local groups", "num_local_groups", len(user.Groups), "local_groups", user.Groups)
+ }
+ allGroups = append(allGroups, user.Groups...)
+ }
+ // Merge local and Okta groups
+ allGroups = append(allGroups, oktaGroups...)
+
+ // Retrieve policies
+ var policies []string
+ for _, groupName := range allGroups {
+ group, err := b.Group(req.Storage, groupName)
+ if err == nil && group != nil && group.Policies != nil {
+ policies = append(policies, group.Policies...)
+ }
+ }
+
+ // Merge local Policies into Okta Policies
+ if user != nil && user.Policies != nil {
+ policies = append(policies, user.Policies...)
+ }
+
+ if len(policies) == 0 {
+ errStr := "user is not a member of any authorized policy"
+ if len(oktaResponse.Warnings()) > 0 {
+ errStr = fmt.Sprintf("%s; additionally, %s", errStr, oktaResponse.Warnings()[0])
+ }
+
+ oktaResponse.Data["error"] = errStr
+ return nil, oktaResponse, nil
+ }
+
+ return policies, oktaResponse, nil
+}
+
+func (b *backend) getOktaGroups(cfg *ConfigEntry, userID string) ([]string, error) {
+ if cfg.Token != "" {
+ client := cfg.OktaClient()
+ groups, err := client.Groups(userID)
+ if err != nil {
+ return nil, err
+ }
+
+ oktaGroups := make([]string, 0, len(*groups))
+ for _, group := range *groups {
+ oktaGroups = append(oktaGroups, group.Profile.Name)
+ }
+ return oktaGroups, err
+ }
+ return nil, nil
+}
+
+const backendHelp = `
+The Okta credential provider allows authentication querying,
+checking username and password, and associating policies. If an api token is configure
+groups are pulled down from Okta.
+
+Configuration of the connection is done through the "config" and "policies"
+endpoints by a user with root access. Authentication is then done
+by suppying the two fields for "login".
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/okta/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/okta/backend_test.go
new file mode 100644
index 0000000..7672dc0
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/okta/backend_test.go
@@ -0,0 +1,174 @@
+package okta
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/helper/policyutil"
+ log "github.com/mgutz/logxi/v1"
+
+ "github.com/hashicorp/vault/logical"
+ logicaltest "github.com/hashicorp/vault/logical/testing"
+)
+
+func TestBackend_Config(t *testing.T) {
+ b, err := Factory(&logical.BackendConfig{
+ Logger: logformat.NewVaultLogger(log.LevelTrace),
+ System: &logical.StaticSystemView{},
+ })
+ if err != nil {
+ t.Fatalf("Unable to create backend: %s", err)
+ }
+
+ username := os.Getenv("OKTA_USERNAME")
+ password := os.Getenv("OKTA_PASSWORD")
+
+ configData := map[string]interface{}{
+ "organization": os.Getenv("OKTA_ORG"),
+ "base_url": "oktapreview.com",
+ }
+
+ configDataToken := map[string]interface{}{
+ "token": os.Getenv("OKTA_API_TOKEN"),
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ AcceptanceTest: true,
+ PreCheck: func() { testAccPreCheck(t) },
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testConfigCreate(t, configData),
+ testLoginWrite(t, username, "wrong", "E0000004", nil),
+ testLoginWrite(t, username, password, "user is not a member of any authorized policy", nil),
+ testAccUserGroups(t, username, "local_group,local_group2"),
+ testAccGroups(t, "local_group", "local_group_policy"),
+ testLoginWrite(t, username, password, "", []string{"local_group_policy"}),
+ testAccGroups(t, "Everyone", "everyone_group_policy,every_group_policy2"),
+ testLoginWrite(t, username, password, "", []string{"local_group_policy"}),
+ testConfigUpdate(t, configDataToken),
+ testConfigRead(t, configData),
+ testLoginWrite(t, username, password, "", []string{"everyone_group_policy", "every_group_policy2", "local_group_policy"}),
+ testAccGroups(t, "local_group2", "testgroup_group_policy"),
+ testLoginWrite(t, username, password, "", []string{"everyone_group_policy", "every_group_policy2", "local_group_policy", "testgroup_group_policy"}),
+ },
+ })
+}
+
+func testLoginWrite(t *testing.T, username, password, reason string, policies []string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "login/" + username,
+ ErrorOk: true,
+ Data: map[string]interface{}{
+ "password": password,
+ },
+ Check: func(resp *logical.Response) error {
+ if resp.IsError() {
+ if reason == "" || !strings.Contains(resp.Error().Error(), reason) {
+ return resp.Error()
+ }
+ }
+
+ if resp.Auth != nil {
+ if !policyutil.EquivalentPolicies(resp.Auth.Policies, policies) {
+ return fmt.Errorf("policy mismatch expected %v but got %v", policies, resp.Auth.Policies)
+ }
+ }
+
+ return nil
+ },
+ }
+}
+
+func testConfigCreate(t *testing.T, d map[string]interface{}) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.CreateOperation,
+ Path: "config",
+ Data: d,
+ }
+}
+
+func testConfigUpdate(t *testing.T, d map[string]interface{}) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config",
+ Data: d,
+ }
+}
+
+func testConfigRead(t *testing.T, d map[string]interface{}) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "config",
+ Check: func(resp *logical.Response) error {
+ if resp.IsError() {
+ return resp.Error()
+ }
+
+ if resp.Data["Org"] != d["organization"] {
+ return fmt.Errorf("Org mismatch expected %s but got %s", d["organization"], resp.Data["Org"])
+ }
+
+ if resp.Data["BaseURL"] != d["base_url"] {
+ return fmt.Errorf("BaseURL mismatch expected %s but got %s", d["base_url"], resp.Data["BaseURL"])
+ }
+
+ if _, exists := resp.Data["Token"]; exists {
+ return fmt.Errorf("token should not be returned on a read request")
+ }
+
+ return nil
+ },
+ }
+}
+
+func testAccPreCheck(t *testing.T) {
+ if v := os.Getenv("OKTA_USERNAME"); v == "" {
+ t.Fatal("OKTA_USERNAME must be set for acceptance tests")
+ }
+
+ if v := os.Getenv("OKTA_PASSWORD"); v == "" {
+ t.Fatal("OKTA_PASSWORD must be set for acceptance tests")
+ }
+
+ if v := os.Getenv("OKTA_ORG"); v == "" {
+ t.Fatal("OKTA_ORG must be set for acceptance tests")
+ }
+}
+
+func testAccUserGroups(t *testing.T, user string, groups string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "users/" + user,
+ Data: map[string]interface{}{
+ "groups": groups,
+ },
+ }
+}
+
+func testAccGroups(t *testing.T, group string, policies string) logicaltest.TestStep {
+ t.Logf("[testAccGroups] - Registering group %s, policy %s", group, policies)
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "groups/" + group,
+ Data: map[string]interface{}{
+ "policies": policies,
+ },
+ }
+}
+
+func testAccLogin(t *testing.T, user, password string, keys []string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "login/" + user,
+ Data: map[string]interface{}{
+ "password": password,
+ },
+ Unauthenticated: true,
+
+ Check: logicaltest.TestCheckAuth(keys),
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/okta/cli.go b/vendor/github.com/hashicorp/vault/builtin/credential/okta/cli.go
new file mode 100644
index 0000000..355e8cb
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/okta/cli.go
@@ -0,0 +1,66 @@
+package okta
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/hashicorp/vault/api"
+ pwd "github.com/hashicorp/vault/helper/password"
+)
+
+// CLIHandler struct
+type CLIHandler struct{}
+
+// Auth cli method
+func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) {
+ mount, ok := m["mount"]
+ if !ok {
+ mount = "okta"
+ }
+
+ username, ok := m["username"]
+ if !ok {
+ return "", fmt.Errorf("'username' var must be set")
+ }
+ password, ok := m["password"]
+ if !ok {
+ fmt.Printf("Password (will be hidden): ")
+ var err error
+ password, err = pwd.Read(os.Stdin)
+ fmt.Println()
+ if err != nil {
+ return "", err
+ }
+ }
+
+ data := map[string]interface{}{
+ "password": password,
+ }
+
+ path := fmt.Sprintf("auth/%s/login/%s", mount, username)
+ secret, err := c.Logical().Write(path, data)
+ if err != nil {
+ return "", err
+ }
+ if secret == nil {
+ return "", fmt.Errorf("empty response from credential provider")
+ }
+
+ return secret.Auth.ClientToken, nil
+}
+
+// Help method for okta cli
+func (h *CLIHandler) Help() string {
+ help := `
+The Okta credential provider allows you to authenticate with Okta.
+To use it, first configure it through the "config" endpoint, and then
+login by specifying username and password. If password is not provided
+on the command line, it will be read from stdin.
+
+ Example: vault auth -method=okta username=john
+
+ `
+
+ return strings.TrimSpace(help)
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_config.go b/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_config.go
new file mode 100644
index 0000000..b454f7e
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_config.go
@@ -0,0 +1,169 @@
+package okta
+
+import (
+ "fmt"
+ "net/url"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ "github.com/sstarcher/go-okta"
+)
+
+func pathConfig(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: `config`,
+ Fields: map[string]*framework.FieldSchema{
+ "organization": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Okta organization to authenticate against",
+ },
+ "token": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Okta admin API token",
+ },
+ "base_url": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `The API endpoint to use. Useful if you
+are using Okta development accounts.`,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathConfigRead,
+ logical.CreateOperation: b.pathConfigWrite,
+ logical.UpdateOperation: b.pathConfigWrite,
+ },
+
+ ExistenceCheck: b.pathConfigExistenceCheck,
+
+ HelpSynopsis: pathConfigHelp,
+ }
+}
+
+// Config returns the configuration for this backend.
+func (b *backend) Config(s logical.Storage) (*ConfigEntry, error) {
+ entry, err := s.Get("config")
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result ConfigEntry
+ if entry != nil {
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+ }
+
+ return &result, nil
+}
+
+func (b *backend) pathConfigRead(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+
+ cfg, err := b.Config(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+ if cfg == nil {
+ return nil, nil
+ }
+
+ resp := &logical.Response{
+ Data: map[string]interface{}{
+ "Org": cfg.Org,
+ "BaseURL": cfg.BaseURL,
+ },
+ }
+
+ return resp, nil
+}
+
+func (b *backend) pathConfigWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ org := d.Get("organization").(string)
+ cfg, err := b.Config(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+
+ // Due to the existence check, entry will only be nil if it's a create
+ // operation, so just create a new one
+ if cfg == nil {
+ cfg = &ConfigEntry{
+ Org: org,
+ }
+ }
+
+ token, ok := d.GetOk("token")
+ if ok {
+ cfg.Token = token.(string)
+ } else if req.Operation == logical.CreateOperation {
+ cfg.Token = d.Get("token").(string)
+ }
+
+ baseURL, ok := d.GetOk("base_url")
+ if ok {
+ baseURLString := baseURL.(string)
+ if len(baseURLString) != 0 {
+ _, err = url.Parse(baseURLString)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("Error parsing given base_url: %s", err)), nil
+ }
+ cfg.BaseURL = baseURLString
+ }
+ } else if req.Operation == logical.CreateOperation {
+ cfg.BaseURL = d.Get("base_url").(string)
+ }
+
+ jsonCfg, err := logical.StorageEntryJSON("config", cfg)
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(jsonCfg); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *backend) pathConfigExistenceCheck(
+ req *logical.Request, d *framework.FieldData) (bool, error) {
+ cfg, err := b.Config(req.Storage)
+ if err != nil {
+ return false, err
+ }
+
+ return cfg != nil, nil
+}
+
+// OktaClient creates a basic okta client connection
+func (c *ConfigEntry) OktaClient() *okta.Client {
+ client := okta.NewClient(c.Org)
+ if c.BaseURL != "" {
+ client.Url = c.BaseURL
+ }
+
+ if c.Token != "" {
+ client.ApiToken = c.Token
+ }
+
+ return client
+}
+
+// ConfigEntry for Okta
+type ConfigEntry struct {
+ Org string `json:"organization"`
+ Token string `json:"token"`
+ BaseURL string `json:"base_url"`
+}
+
+const pathConfigHelp = `
+This endpoint allows you to configure the Okta and its
+configuration options.
+
+The Okta organization are the characters at the front of the URL for Okta.
+Example https://ORG.okta.com
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_groups.go b/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_groups.go
new file mode 100644
index 0000000..d111775
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_groups.go
@@ -0,0 +1,147 @@
+package okta
+
+import (
+ "github.com/hashicorp/vault/helper/policyutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathGroupsList(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "groups/?$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.pathGroupList,
+ },
+
+ HelpSynopsis: pathGroupHelpSyn,
+ HelpDescription: pathGroupHelpDesc,
+ }
+}
+
+func pathGroups(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: `groups/(?P.+)`,
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the Okta group.",
+ },
+
+ "policies": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Comma-separated list of policies associated to the group.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.DeleteOperation: b.pathGroupDelete,
+ logical.ReadOperation: b.pathGroupRead,
+ logical.UpdateOperation: b.pathGroupWrite,
+ },
+
+ HelpSynopsis: pathGroupHelpSyn,
+ HelpDescription: pathGroupHelpDesc,
+ }
+}
+
+func (b *backend) Group(s logical.Storage, n string) (*GroupEntry, error) {
+ entry, err := s.Get("group/" + n)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result GroupEntry
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+func (b *backend) pathGroupDelete(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ name := d.Get("name").(string)
+ if len(name) == 0 {
+ return logical.ErrorResponse("Error empty name"), nil
+ }
+
+ err := req.Storage.Delete("group/" + name)
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *backend) pathGroupRead(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ name := d.Get("name").(string)
+ if len(name) == 0 {
+ return logical.ErrorResponse("Error empty name"), nil
+ }
+
+ group, err := b.Group(req.Storage, name)
+ if err != nil {
+ return nil, err
+ }
+ if group == nil {
+ return nil, nil
+ }
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "policies": group.Policies,
+ },
+ }, nil
+}
+
+func (b *backend) pathGroupWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ name := d.Get("name").(string)
+ if len(name) == 0 {
+ return logical.ErrorResponse("Error empty name"), nil
+ }
+
+ entry, err := logical.StorageEntryJSON("group/"+name, &GroupEntry{
+ Policies: policyutil.ParsePolicies(d.Get("policies").(string)),
+ })
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *backend) pathGroupList(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ groups, err := req.Storage.List("group/")
+ if err != nil {
+ return nil, err
+ }
+ return logical.ListResponse(groups), nil
+}
+
+type GroupEntry struct {
+ Policies []string
+}
+
+const pathGroupHelpSyn = `
+Manage users allowed to authenticate.
+`
+
+const pathGroupHelpDesc = `
+This endpoint allows you to create, read, update, and delete configuration
+for Okta groups that are allowed to authenticate, and associate policies to
+them.
+
+Deleting a group will not revoke auth for prior authenticated users in that
+group. To do this, do a revoke on "login/" for
+the usernames you want revoked.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_login.go b/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_login.go
new file mode 100644
index 0000000..accc867
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_login.go
@@ -0,0 +1,99 @@
+package okta
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/hashicorp/vault/helper/policyutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathLogin(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: `login/(?P.+)`,
+ Fields: map[string]*framework.FieldSchema{
+ "username": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Username to be used for login.",
+ },
+
+ "password": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Password for this user.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathLogin,
+ },
+
+ HelpSynopsis: pathLoginSyn,
+ HelpDescription: pathLoginDesc,
+ }
+}
+
+func (b *backend) pathLogin(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ username := d.Get("username").(string)
+ password := d.Get("password").(string)
+
+ policies, resp, err := b.Login(req, username, password)
+ // Handle an internal error
+ if err != nil {
+ return nil, err
+ }
+ if resp != nil {
+ // Handle a logical error
+ if resp.IsError() {
+ return resp, nil
+ }
+ } else {
+ resp = &logical.Response{}
+ }
+
+ sort.Strings(policies)
+
+ resp.Auth = &logical.Auth{
+ Policies: policies,
+ Metadata: map[string]string{
+ "username": username,
+ "policies": strings.Join(policies, ","),
+ },
+ InternalData: map[string]interface{}{
+ "password": password,
+ },
+ DisplayName: username,
+ LeaseOptions: logical.LeaseOptions{
+ Renewable: true,
+ },
+ }
+ return resp, nil
+}
+
+func (b *backend) pathLoginRenew(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+
+ username := req.Auth.Metadata["username"]
+ password := req.Auth.InternalData["password"].(string)
+
+ loginPolicies, resp, err := b.Login(req, username, password)
+ if len(loginPolicies) == 0 {
+ return resp, err
+ }
+
+ if !policyutil.EquivalentPolicies(loginPolicies, req.Auth.Policies) {
+ return nil, fmt.Errorf("policies have changed, not renewing")
+ }
+
+ return framework.LeaseExtend(0, 0, b.System())(req, d)
+}
+
+const pathLoginSyn = `
+Log in with a username and password.
+`
+
+const pathLoginDesc = `
+This endpoint authenticates using a username and password.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_users.go b/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_users.go
new file mode 100644
index 0000000..1512da4
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_users.go
@@ -0,0 +1,166 @@
+package okta
+
+import (
+ "strings"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathUsersList(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "users/?$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.pathUserList,
+ },
+
+ HelpSynopsis: pathUserHelpSyn,
+ HelpDescription: pathUserHelpDesc,
+ }
+}
+
+func pathUsers(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: `users/(?P.+)`,
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the user.",
+ },
+
+ "groups": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Comma-separated list of groups associated with the user.",
+ },
+
+ "policies": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Comma-separated list of policies associated with the user.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.DeleteOperation: b.pathUserDelete,
+ logical.ReadOperation: b.pathUserRead,
+ logical.UpdateOperation: b.pathUserWrite,
+ },
+
+ HelpSynopsis: pathUserHelpSyn,
+ HelpDescription: pathUserHelpDesc,
+ }
+}
+
+func (b *backend) User(s logical.Storage, n string) (*UserEntry, error) {
+ entry, err := s.Get("user/" + n)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result UserEntry
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+func (b *backend) pathUserDelete(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ name := d.Get("name").(string)
+ if len(name) == 0 {
+ return logical.ErrorResponse("Error empty name"), nil
+ }
+
+ err := req.Storage.Delete("user/" + name)
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *backend) pathUserRead(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ name := d.Get("name").(string)
+ if len(name) == 0 {
+ return logical.ErrorResponse("Error empty name"), nil
+ }
+
+ user, err := b.User(req.Storage, name)
+ if err != nil {
+ return nil, err
+ }
+ if user == nil {
+ return nil, nil
+ }
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "groups": user.Groups,
+ "policies": user.Policies,
+ },
+ }, nil
+}
+
+func (b *backend) pathUserWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ name := d.Get("name").(string)
+ if len(name) == 0 {
+ return logical.ErrorResponse("Error empty name"), nil
+ }
+
+ groups := strings.Split(d.Get("groups").(string), ",")
+ for i, g := range groups {
+ groups[i] = strings.TrimSpace(g)
+ }
+
+ policies := strings.Split(d.Get("policies").(string), ",")
+ for i, p := range policies {
+ policies[i] = strings.TrimSpace(p)
+ }
+
+ // Store it
+ entry, err := logical.StorageEntryJSON("user/"+name, &UserEntry{
+ Groups: groups,
+ Policies: policies,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *backend) pathUserList(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ users, err := req.Storage.List("user/")
+ if err != nil {
+ return nil, err
+ }
+ return logical.ListResponse(users), nil
+}
+
+type UserEntry struct {
+ Groups []string
+ Policies []string
+}
+
+const pathUserHelpSyn = `
+Manage additional groups for users allowed to authenticate.
+`
+
+const pathUserHelpDesc = `
+This endpoint allows you to create, read, update, and delete configuration
+for Okta users that are allowed to authenticate, in particular associating
+additional groups to them.
+
+Deleting a user will not revoke their auth. To do this, do a revoke on "login/" for
+the usernames you want revoked.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/radius/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/radius/backend.go
new file mode 100644
index 0000000..4bd3306
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/radius/backend.go
@@ -0,0 +1,57 @@
+package radius
+
+import (
+ "github.com/hashicorp/vault/helper/mfa"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
+ return Backend().Setup(conf)
+}
+
+func Backend() *backend {
+ var b backend
+ b.Backend = &framework.Backend{
+ Help: backendHelp,
+
+ PathsSpecial: &logical.Paths{
+ Root: mfa.MFARootPaths(),
+
+ Unauthenticated: []string{
+ "login",
+ "login/*",
+ },
+ },
+
+ Paths: append([]*framework.Path{
+ pathConfig(&b),
+ pathUsers(&b),
+ pathUsersList(&b),
+ },
+ mfa.MFAPaths(b.Backend, pathLogin(&b))...,
+ ),
+
+ AuthRenew: b.pathLoginRenew,
+ }
+
+ return &b
+}
+
+type backend struct {
+ *framework.Backend
+}
+
+const backendHelp = `
+The "radius" credential provider allows authentication against
+a RADIUS server, checking username and associating users
+to set of policies.
+
+Configuration of the server is done through the "config" and "users"
+endpoints by a user with approriate access mandated by policy.
+Authentication is then done by suppying the two fields for "login".
+
+The backend optionally allows to grant a set of policies to any
+user that successfully authenticates against the RADIUS server,
+without them being explicitly mapped in vault.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/radius/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/radius/backend_test.go
new file mode 100644
index 0000000..86ce19a
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/radius/backend_test.go
@@ -0,0 +1,262 @@
+package radius
+
+import (
+ "fmt"
+ "os"
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+ logicaltest "github.com/hashicorp/vault/logical/testing"
+)
+
+const (
+ testSysTTL = time.Hour * 10
+ testSysMaxTTL = time.Hour * 20
+)
+
+func TestBackend_Config(t *testing.T) {
+ b, err := Factory(&logical.BackendConfig{
+ Logger: nil,
+ System: &logical.StaticSystemView{
+ DefaultLeaseTTLVal: testSysTTL,
+ MaxLeaseTTLVal: testSysMaxTTL,
+ },
+ })
+ if err != nil {
+ t.Fatalf("Unable to create backend: %s", err)
+ }
+
+ config_data_basic := map[string]interface{}{
+ "host": "test.radius.hostname.com",
+ "secret": "test-secret",
+ }
+
+ config_data_missingrequired := map[string]interface{}{
+ "host": "test.radius.hostname.com",
+ }
+
+ config_data_invalidport := map[string]interface{}{
+ "host": "test.radius.hostname.com",
+ "port": "notnumeric",
+ "secret": "test-secret",
+ }
+
+ config_data_invalidbool := map[string]interface{}{
+ "host": "test.radius.hostname.com",
+ "secret": "test-secret",
+ "unregistered_user_policies": "test",
+ }
+
+ config_data_emptyport := map[string]interface{}{
+ "host": "test.radius.hostname.com",
+ "port": "",
+ "secret": "test-secret",
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ AcceptanceTest: false,
+ // PreCheck: func() { testAccPreCheck(t) },
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testConfigWrite(t, config_data_basic, false),
+ testConfigWrite(t, config_data_emptyport, true),
+ testConfigWrite(t, config_data_invalidport, true),
+ testConfigWrite(t, config_data_invalidbool, true),
+ testConfigWrite(t, config_data_missingrequired, true),
+ },
+ })
+}
+
+func TestBackend_users(t *testing.T) {
+ b, err := Factory(&logical.BackendConfig{
+ Logger: nil,
+ System: &logical.StaticSystemView{
+ DefaultLeaseTTLVal: testSysTTL,
+ MaxLeaseTTLVal: testSysMaxTTL,
+ },
+ })
+ if err != nil {
+ t.Fatalf("Unable to create backend: %s", err)
+ }
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testStepUpdateUser(t, "web", "foo"),
+ testStepUpdateUser(t, "web2", "foo"),
+ testStepUpdateUser(t, "web3", "foo"),
+ testStepUserList(t, []string{"web", "web2", "web3"}),
+ },
+ })
+}
+
+func TestBackend_acceptance(t *testing.T) {
+
+ if os.Getenv(logicaltest.TestEnvVar) == "" {
+ t.Skip(fmt.Sprintf("Acceptance tests skipped unless env '%s' set", logicaltest.TestEnvVar))
+ return
+ }
+
+ b, err := Factory(&logical.BackendConfig{
+ Logger: nil,
+ System: &logical.StaticSystemView{
+ DefaultLeaseTTLVal: testSysTTL,
+ MaxLeaseTTLVal: testSysMaxTTL,
+ },
+ })
+ if err != nil {
+ t.Fatalf("Unable to create backend: %s", err)
+ }
+
+ configDataAcceptanceAllowUnreg := map[string]interface{}{
+ "host": os.Getenv("RADIUS_HOST"),
+ "port": os.Getenv("RADIUS_PORT"),
+ "secret": os.Getenv("RADIUS_SECRET"),
+ "unregistered_user_policies": "policy1,policy2",
+ }
+ if configDataAcceptanceAllowUnreg["port"] == "" {
+ configDataAcceptanceAllowUnreg["port"] = "1812"
+ }
+
+ configDataAcceptanceNoAllowUnreg := map[string]interface{}{
+ "host": os.Getenv("RADIUS_HOST"),
+ "port": os.Getenv("RADIUS_PORT"),
+ "secret": os.Getenv("RADIUS_SECRET"),
+ "unregistered_user_policies": "",
+ }
+ if configDataAcceptanceNoAllowUnreg["port"] == "" {
+ configDataAcceptanceNoAllowUnreg["port"] = "1812"
+ }
+
+ dataRealpassword := map[string]interface{}{
+ "password": os.Getenv("RADIUS_USERPASS"),
+ }
+
+ dataWrongpassword := map[string]interface{}{
+ "password": "wrongpassword",
+ }
+
+ username := os.Getenv("RADIUS_USERNAME")
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ PreCheck: func() { testAccPreCheck(t) },
+ AcceptanceTest: true,
+ Steps: []logicaltest.TestStep{
+ // Login with valid but unknown user will fail because unregistered_user_policies is emtpy
+ testConfigWrite(t, configDataAcceptanceNoAllowUnreg, false),
+ testAccUserLogin(t, username, dataRealpassword, true),
+ // Once the user is registered auth will succeed
+ testStepUpdateUser(t, username, ""),
+ testAccUserLoginPolicy(t, username, dataRealpassword, []string{"default"}, false),
+
+ testStepUpdateUser(t, username, "foopolicy"),
+ testAccUserLoginPolicy(t, username, dataRealpassword, []string{"default", "foopolicy"}, false),
+ testAccStepDeleteUser(t, username),
+
+ // When unregistered_user_policies is specified, an unknown user will be granted access and granted the listed policies
+ testConfigWrite(t, configDataAcceptanceAllowUnreg, false),
+ testAccUserLoginPolicy(t, username, dataRealpassword, []string{"default", "policy1", "policy2"}, false),
+
+ // More tests
+ testAccUserLogin(t, "nonexistinguser", dataRealpassword, true),
+ testAccUserLogin(t, username, dataWrongpassword, true),
+ testStepUpdateUser(t, username, "foopolicy"),
+ testAccUserLoginPolicy(t, username, dataRealpassword, []string{"default", "foopolicy"}, false),
+ testStepUpdateUser(t, username, "foopolicy, secondpolicy"),
+ testAccUserLoginPolicy(t, username, dataRealpassword, []string{"default", "foopolicy", "secondpolicy"}, false),
+ testAccUserLoginPolicy(t, username, dataRealpassword, []string{"default", "foopolicy", "secondpolicy", "thirdpolicy"}, true),
+ },
+ })
+}
+
+func testAccPreCheck(t *testing.T) {
+ if v := os.Getenv("RADIUS_HOST"); v == "" {
+ t.Fatal("RADIUS_HOST must be set for acceptance tests")
+ }
+
+ if v := os.Getenv("RADIUS_USERNAME"); v == "" {
+ t.Fatal("RADIUS_USERNAME must be set for acceptance tests")
+ }
+
+ if v := os.Getenv("RADIUS_USERPASS"); v == "" {
+ t.Fatal("RADIUS_USERPASS must be set for acceptance tests")
+ }
+
+ if v := os.Getenv("RADIUS_SECRET"); v == "" {
+ t.Fatal("RADIUS_SECRET must be set for acceptance tests")
+ }
+}
+
+func testConfigWrite(t *testing.T, d map[string]interface{}, expectError bool) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config",
+ Data: d,
+ ErrorOk: expectError,
+ }
+}
+
+func testAccStepDeleteUser(t *testing.T, n string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.DeleteOperation,
+ Path: "users/" + n,
+ }
+}
+
+func testStepUserList(t *testing.T, users []string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ListOperation,
+ Path: "users",
+ Check: func(resp *logical.Response) error {
+ if resp.IsError() {
+ return fmt.Errorf("Got error response: %#v", *resp)
+ }
+
+ if !reflect.DeepEqual(users, resp.Data["keys"].([]string)) {
+ return fmt.Errorf("expected:\n%#v\ngot:\n%#v\n", users, resp.Data["keys"])
+ }
+ return nil
+ },
+ }
+}
+
+func testStepUpdateUser(
+ t *testing.T, name string, policies string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "users/" + name,
+ Data: map[string]interface{}{
+ "policies": policies,
+ },
+ }
+}
+
+func testAccUserLogin(t *testing.T, user string, data map[string]interface{}, expectError bool) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "login/" + user,
+ Data: data,
+ ErrorOk: expectError,
+ Unauthenticated: true,
+ }
+}
+
+func testAccUserLoginPolicy(t *testing.T, user string, data map[string]interface{}, policies []string, expectError bool) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "login/" + user,
+ Data: data,
+ ErrorOk: false,
+ Unauthenticated: true,
+ //Check: logicaltest.TestCheckAuth(policies),
+ Check: func(resp *logical.Response) error {
+ res := logicaltest.TestCheckAuth(policies)(resp)
+ if res != nil && expectError {
+ return nil
+ }
+ return res
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_config.go b/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_config.go
new file mode 100644
index 0000000..2eaac22
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_config.go
@@ -0,0 +1,220 @@
+package radius
+
+import (
+ "strings"
+
+ "github.com/fatih/structs"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathConfig(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "config",
+ Fields: map[string]*framework.FieldSchema{
+ "host": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "RADIUS server host",
+ },
+
+ "port": &framework.FieldSchema{
+ Type: framework.TypeInt,
+ Default: 1812,
+ Description: "RADIUS server port (default: 1812)",
+ },
+ "secret": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Secret shared with the RADIUS server",
+ },
+ "unregistered_user_policies": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "",
+ Description: "Comma-separated list of policies to grant upon successful RADIUS authentication of an unregisted user (default: emtpy)",
+ },
+ "dial_timeout": &framework.FieldSchema{
+ Type: framework.TypeDurationSecond,
+ Default: 10,
+ Description: "Number of seconds before connect times out (default: 10)",
+ },
+ "read_timeout": &framework.FieldSchema{
+ Type: framework.TypeDurationSecond,
+ Default: 10,
+ Description: "Number of seconds before response times out (default: 10)",
+ },
+ "nas_port": &framework.FieldSchema{
+ Type: framework.TypeInt,
+ Default: 10,
+ Description: "RADIUS NAS port field (default: 10)",
+ },
+ },
+
+ ExistenceCheck: b.configExistenceCheck,
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathConfigRead,
+ logical.CreateOperation: b.pathConfigCreateUpdate,
+ logical.UpdateOperation: b.pathConfigCreateUpdate,
+ },
+
+ HelpSynopsis: pathConfigHelpSyn,
+ HelpDescription: pathConfigHelpDesc,
+ }
+}
+
+// Establishes dichotomy of request operation between CreateOperation and UpdateOperation.
+// Returning 'true' forces an UpdateOperation, CreateOperation otherwise.
+func (b *backend) configExistenceCheck(req *logical.Request, data *framework.FieldData) (bool, error) {
+ entry, err := b.Config(req)
+ if err != nil {
+ return false, err
+ }
+ return entry != nil, nil
+}
+
+/*
+ * Construct ConfigEntry struct using stored configuration.
+ */
+func (b *backend) Config(req *logical.Request) (*ConfigEntry, error) {
+
+ storedConfig, err := req.Storage.Get("config")
+ if err != nil {
+ return nil, err
+ }
+
+ if storedConfig == nil {
+ return nil, nil
+ }
+
+ var result ConfigEntry
+
+ if err := storedConfig.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+func (b *backend) pathConfigRead(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+
+ cfg, err := b.Config(req)
+ if err != nil {
+ return nil, err
+ }
+ if cfg == nil {
+ return nil, nil
+ }
+
+ resp := &logical.Response{
+ Data: structs.New(cfg).Map(),
+ }
+ resp.AddWarning("Read access to this endpoint should be controlled via ACLs as it will return the configuration information as-is, including any secrets.")
+ return resp, nil
+}
+
+func (b *backend) pathConfigCreateUpdate(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+
+ // Build a ConfigEntry struct out of the supplied FieldData
+ cfg, err := b.Config(req)
+ if err != nil {
+ return nil, err
+ }
+ if cfg == nil {
+ cfg = &ConfigEntry{}
+ }
+
+ host, ok := d.GetOk("host")
+ if ok {
+ cfg.Host = strings.ToLower(host.(string))
+ } else if req.Operation == logical.CreateOperation {
+ cfg.Host = strings.ToLower(d.Get("host").(string))
+ }
+ if cfg.Host == "" {
+ return logical.ErrorResponse("config parameter `host` cannot be empty"), nil
+ }
+
+ port, ok := d.GetOk("port")
+ if ok {
+ cfg.Port = port.(int)
+ } else if req.Operation == logical.CreateOperation {
+ cfg.Port = d.Get("port").(int)
+ }
+
+ secret, ok := d.GetOk("secret")
+ if ok {
+ cfg.Secret = secret.(string)
+ } else if req.Operation == logical.CreateOperation {
+ cfg.Secret = d.Get("secret").(string)
+ }
+ if cfg.Secret == "" {
+ return logical.ErrorResponse("config parameter `secret` cannot be empty"), nil
+ }
+
+ policies := make([]string, 0)
+ unregisteredUserPoliciesRaw, ok := d.GetOk("unregistered_user_policies")
+ if ok {
+ unregisteredUserPoliciesStr := unregisteredUserPoliciesRaw.(string)
+ if strings.TrimSpace(unregisteredUserPoliciesStr) != "" {
+ policies = strings.Split(unregisteredUserPoliciesStr, ",")
+ for _, policy := range policies {
+ if policy == "root" {
+ return logical.ErrorResponse("root policy cannot be granted by an authentication backend"), nil
+ }
+ }
+ }
+ cfg.UnregisteredUserPolicies = policies
+ } else if req.Operation == logical.CreateOperation {
+ cfg.UnregisteredUserPolicies = policies
+ }
+
+ dialTimeout, ok := d.GetOk("dial_timeout")
+ if ok {
+ cfg.DialTimeout = dialTimeout.(int)
+ } else if req.Operation == logical.CreateOperation {
+ cfg.DialTimeout = d.Get("dial_timeout").(int)
+ }
+
+ readTimeout, ok := d.GetOk("read_timeout")
+ if ok {
+ cfg.ReadTimeout = readTimeout.(int)
+ } else if req.Operation == logical.CreateOperation {
+ cfg.ReadTimeout = d.Get("read_timeout").(int)
+ }
+
+ nasPort, ok := d.GetOk("nas_port")
+ if ok {
+ cfg.NasPort = nasPort.(int)
+ } else if req.Operation == logical.CreateOperation {
+ cfg.NasPort = d.Get("nas_port").(int)
+ }
+
+ entry, err := logical.StorageEntryJSON("config", cfg)
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+type ConfigEntry struct {
+ Host string `json:"host" structs:"host" mapstructure:"host"`
+ Port int `json:"port" structs:"port" mapstructure:"port"`
+ Secret string `json:"secret" structs:"secret" mapstructure:"secret"`
+ UnregisteredUserPolicies []string `json:"unregistered_user_policies" structs:"unregistered_user_policies" mapstructure:"unregistered_user_policies"`
+ DialTimeout int `json:"dial_timeout" structs:"dial_timeout" mapstructure:"dial_timeout"`
+ ReadTimeout int `json:"read_timeout" structs:"read_timeout" mapstructure:"read_timeout"`
+ NasPort int `json:"nas_port" structs:"nas_port" mapstructure:"nas_port"`
+}
+
+const pathConfigHelpSyn = `
+Configure the RADIUS server to connect to, along with its options.
+`
+
+const pathConfigHelpDesc = `
+This endpoint allows you to configure the RADIUS server to connect to and its
+configuration options.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_login.go b/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_login.go
new file mode 100644
index 0000000..6f2c16d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_login.go
@@ -0,0 +1,165 @@
+package radius
+
+import (
+ "fmt"
+ "net"
+ "strconv"
+ "strings"
+ "time"
+
+ "layeh.com/radius"
+
+ "github.com/hashicorp/vault/helper/policyutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathLogin(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "login" + framework.OptionalParamRegex("urlusername"),
+ Fields: map[string]*framework.FieldSchema{
+ "urlusername": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Username to be used for login. (URL parameter)",
+ },
+
+ "username": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Username to be used for login. (POST request body)",
+ },
+
+ "password": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Password for this user.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathLogin,
+ },
+
+ HelpSynopsis: pathLoginSyn,
+ HelpDescription: pathLoginDesc,
+ }
+}
+
+func (b *backend) pathLogin(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ username := d.Get("username").(string)
+ password := d.Get("password").(string)
+
+ if username == "" {
+ username = d.Get("urlusername").(string)
+ if username == "" {
+ return logical.ErrorResponse("username cannot be emtpy"), nil
+ }
+ }
+
+ if password == "" {
+ return logical.ErrorResponse("password cannot be emtpy"), nil
+ }
+
+ policies, resp, err := b.RadiusLogin(req, username, password)
+ // Handle an internal error
+ if err != nil {
+ return nil, err
+ }
+ if resp != nil {
+ // Handle a logical error
+ if resp.IsError() {
+ return resp, nil
+ }
+ }
+
+ resp.Auth = &logical.Auth{
+ Policies: policies,
+ Metadata: map[string]string{
+ "username": username,
+ "policies": strings.Join(policies, ","),
+ },
+ InternalData: map[string]interface{}{
+ "password": password,
+ },
+ DisplayName: username,
+ LeaseOptions: logical.LeaseOptions{
+ Renewable: true,
+ },
+ }
+ return resp, nil
+}
+
+func (b *backend) pathLoginRenew(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ var err error
+
+ username := req.Auth.Metadata["username"]
+ password := req.Auth.InternalData["password"].(string)
+
+ var resp *logical.Response
+ var loginPolicies []string
+
+ loginPolicies, resp, err = b.RadiusLogin(req, username, password)
+ if err != nil || (resp != nil && resp.IsError()) {
+ return resp, err
+ }
+
+ if !policyutil.EquivalentPolicies(loginPolicies, req.Auth.Policies) {
+ return nil, fmt.Errorf("policies have changed, not renewing")
+ }
+
+ return framework.LeaseExtend(0, 0, b.System())(req, d)
+}
+
+func (b *backend) RadiusLogin(req *logical.Request, username string, password string) ([]string, *logical.Response, error) {
+
+ cfg, err := b.Config(req)
+ if err != nil {
+ return nil, nil, err
+ }
+ if cfg == nil || cfg.Host == "" || cfg.Secret == "" {
+ return nil, logical.ErrorResponse("radius backend not configured"), nil
+ }
+
+ hostport := net.JoinHostPort(cfg.Host, strconv.Itoa(cfg.Port))
+
+ packet := radius.New(radius.CodeAccessRequest, []byte(cfg.Secret))
+ packet.Add("User-Name", username)
+ packet.Add("User-Password", password)
+ packet.Add("NAS-Port", uint32(cfg.NasPort))
+
+ client := radius.Client{
+ DialTimeout: time.Duration(cfg.DialTimeout) * time.Second,
+ ReadTimeout: time.Duration(cfg.ReadTimeout) * time.Second,
+ }
+ received, err := client.Exchange(packet, hostport)
+ if err != nil {
+ return nil, logical.ErrorResponse(err.Error()), nil
+ }
+ if received.Code != radius.CodeAccessAccept {
+ return nil, logical.ErrorResponse("access denied by the authentication server"), nil
+ }
+
+ var policies []string
+ // Retrieve user entry from storage
+ user, err := b.user(req.Storage, username)
+ if user == nil {
+ // No user found, check if unregistered users are allowed (unregistered_user_policies not empty)
+ if len(policyutil.SanitizePolicies(cfg.UnregisteredUserPolicies, false)) == 0 {
+ return nil, logical.ErrorResponse("authentication succeeded but user has no associated policies"), nil
+ }
+ policies = policyutil.SanitizePolicies(cfg.UnregisteredUserPolicies, true)
+ } else {
+ policies = policyutil.SanitizePolicies(user.Policies, true)
+ }
+
+ return policies, &logical.Response{}, nil
+}
+
+const pathLoginSyn = `
+Log in with a username and password.
+`
+
+const pathLoginDesc = `
+This endpoint authenticates using a username and password. Please be sure to
+read the note on escaping from the path-help for the 'config' endpoint.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_users.go b/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_users.go
new file mode 100644
index 0000000..ac9a971
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_users.go
@@ -0,0 +1,160 @@
+package radius
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/helper/policyutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathUsersList(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "users/?$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.pathUserList,
+ },
+
+ HelpSynopsis: pathUserHelpSyn,
+ HelpDescription: pathUserHelpDesc,
+ }
+}
+
+func pathUsers(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: `users/(?P.+)`,
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the RADIUS user.",
+ },
+
+ "policies": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Comma-separated list of policies associated to the user.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.DeleteOperation: b.pathUserDelete,
+ logical.ReadOperation: b.pathUserRead,
+ logical.UpdateOperation: b.pathUserWrite,
+ logical.CreateOperation: b.pathUserWrite,
+ },
+
+ ExistenceCheck: b.userExistenceCheck,
+
+ HelpSynopsis: pathUserHelpSyn,
+ HelpDescription: pathUserHelpDesc,
+ }
+}
+
+func (b *backend) userExistenceCheck(req *logical.Request, data *framework.FieldData) (bool, error) {
+ userEntry, err := b.user(req.Storage, data.Get("name").(string))
+ if err != nil {
+ return false, err
+ }
+
+ return userEntry != nil, nil
+}
+
+func (b *backend) user(s logical.Storage, username string) (*UserEntry, error) {
+ if username == "" {
+ return nil, fmt.Errorf("missing username")
+ }
+
+ entry, err := s.Get("user/" + strings.ToLower(username))
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result UserEntry
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+func (b *backend) pathUserDelete(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ err := req.Storage.Delete("user/" + d.Get("name").(string))
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *backend) pathUserRead(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ user, err := b.user(req.Storage, d.Get("name").(string))
+ if err != nil {
+ return nil, err
+ }
+ if user == nil {
+ return nil, nil
+ }
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "policies": user.Policies,
+ },
+ }, nil
+}
+
+func (b *backend) pathUserWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+
+ var policies = policyutil.ParsePolicies(d.Get("policies").(string))
+ for _, policy := range policies {
+ if policy == "root" {
+ return logical.ErrorResponse("root policy cannot be granted by an authentication backend"), nil
+ }
+ }
+
+ // Store it
+ entry, err := logical.StorageEntryJSON("user/"+d.Get("name").(string), &UserEntry{
+ Policies: policies,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *backend) pathUserList(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ users, err := req.Storage.List("user/")
+ if err != nil {
+ return nil, err
+ }
+ return logical.ListResponse(users), nil
+}
+
+type UserEntry struct {
+ Policies []string
+}
+
+const pathUserHelpSyn = `
+Manage users allowed to authenticate.
+`
+
+const pathUserHelpDesc = `
+This endpoint allows you to create, read, update, and delete configuration
+for RADIUS users that are allowed to authenticate, and associate policies to
+them.
+
+Deleting a user will not revoke auth for prior authenticated users.
+To do this, do a revoke token by path on "auth/radius/login/"
+for the usernames you want revoked.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/backend.go
new file mode 100644
index 0000000..d219895
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/backend.go
@@ -0,0 +1,53 @@
+package userpass
+
+import (
+ "github.com/hashicorp/vault/helper/mfa"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
+ return Backend().Setup(conf)
+}
+
+func Backend() *backend {
+ var b backend
+ b.Backend = &framework.Backend{
+ Help: backendHelp,
+
+ PathsSpecial: &logical.Paths{
+ Root: mfa.MFARootPaths(),
+
+ Unauthenticated: []string{
+ "login/*",
+ },
+ },
+
+ Paths: append([]*framework.Path{
+ pathUsers(&b),
+ pathUsersList(&b),
+ pathUserPolicies(&b),
+ pathUserPassword(&b),
+ },
+ mfa.MFAPaths(b.Backend, pathLogin(&b))...,
+ ),
+
+ AuthRenew: b.pathLoginRenew,
+ }
+
+ return &b
+}
+
+type backend struct {
+ *framework.Backend
+}
+
+const backendHelp = `
+The "userpass" credential provider allows authentication using
+a combination of a username and password. No additional factors
+are supported.
+
+The username/password combination is configured using the "users/"
+endpoints by a user with root access. Authentication is then done
+by suppying the two fields for "login".
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/backend_test.go
new file mode 100644
index 0000000..f04dc6a
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/backend_test.go
@@ -0,0 +1,327 @@
+package userpass
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+ logicaltest "github.com/hashicorp/vault/logical/testing"
+ "github.com/mitchellh/mapstructure"
+)
+
+const (
+ testSysTTL = time.Hour * 10
+ testSysMaxTTL = time.Hour * 20
+)
+
+func TestBackend_TTLDurations(t *testing.T) {
+ data1 := map[string]interface{}{
+ "password": "password",
+ "policies": "root",
+ "ttl": "21h",
+ "max_ttl": "11h",
+ }
+ data2 := map[string]interface{}{
+ "password": "password",
+ "policies": "root",
+ "ttl": "10h",
+ "max_ttl": "21h",
+ }
+ data3 := map[string]interface{}{
+ "password": "password",
+ "policies": "root",
+ "ttl": "10h",
+ "max_ttl": "10h",
+ }
+ data4 := map[string]interface{}{
+ "password": "password",
+ "policies": "root",
+ "ttl": "11h",
+ "max_ttl": "5h",
+ }
+ data5 := map[string]interface{}{
+ "password": "password",
+ }
+ b, err := Factory(&logical.BackendConfig{
+ Logger: nil,
+ System: &logical.StaticSystemView{
+ DefaultLeaseTTLVal: testSysTTL,
+ MaxLeaseTTLVal: testSysMaxTTL,
+ },
+ })
+ if err != nil {
+ t.Fatalf("Unable to create backend: %s", err)
+ }
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testUsersWrite(t, "test", data1, true),
+ testUsersWrite(t, "test", data2, true),
+ testUsersWrite(t, "test", data3, false),
+ testUsersWrite(t, "test", data4, false),
+ testLoginWrite(t, "test", data5, false),
+ testLoginWrite(t, "wrong", data5, true),
+ },
+ })
+}
+
+func TestBackend_basic(t *testing.T) {
+ b, err := Factory(&logical.BackendConfig{
+ Logger: nil,
+ System: &logical.StaticSystemView{
+ DefaultLeaseTTLVal: testSysTTL,
+ MaxLeaseTTLVal: testSysMaxTTL,
+ },
+ })
+ if err != nil {
+ t.Fatalf("Unable to create backend: %s", err)
+ }
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepUser(t, "web", "password", "foo"),
+ testAccStepUser(t, "web2", "password", "foo"),
+ testAccStepUser(t, "web3", "password", "foo"),
+ testAccStepList(t, []string{"web", "web2", "web3"}),
+ testAccStepLogin(t, "web", "password", []string{"default", "foo"}),
+ },
+ })
+}
+
+func TestBackend_userCrud(t *testing.T) {
+ b, err := Factory(&logical.BackendConfig{
+ Logger: nil,
+ System: &logical.StaticSystemView{
+ DefaultLeaseTTLVal: testSysTTL,
+ MaxLeaseTTLVal: testSysMaxTTL,
+ },
+ })
+ if err != nil {
+ t.Fatalf("Unable to create backend: %s", err)
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepUser(t, "web", "password", "foo"),
+ testAccStepReadUser(t, "web", "default,foo"),
+ testAccStepDeleteUser(t, "web"),
+ testAccStepReadUser(t, "web", ""),
+ },
+ })
+}
+
+func TestBackend_userCreateOperation(t *testing.T) {
+ b, err := Factory(&logical.BackendConfig{
+ Logger: nil,
+ System: &logical.StaticSystemView{
+ DefaultLeaseTTLVal: testSysTTL,
+ MaxLeaseTTLVal: testSysMaxTTL,
+ },
+ })
+ if err != nil {
+ t.Fatalf("Unable to create backend: %s", err)
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testUserCreateOperation(t, "web", "password", "foo"),
+ testAccStepLogin(t, "web", "password", []string{"default", "foo"}),
+ },
+ })
+}
+
+func TestBackend_passwordUpdate(t *testing.T) {
+ b, err := Factory(&logical.BackendConfig{
+ Logger: nil,
+ System: &logical.StaticSystemView{
+ DefaultLeaseTTLVal: testSysTTL,
+ MaxLeaseTTLVal: testSysMaxTTL,
+ },
+ })
+ if err != nil {
+ t.Fatalf("Unable to create backend: %s", err)
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepUser(t, "web", "password", "foo"),
+ testAccStepReadUser(t, "web", "default,foo"),
+ testAccStepLogin(t, "web", "password", []string{"default", "foo"}),
+ testUpdatePassword(t, "web", "newpassword"),
+ testAccStepLogin(t, "web", "newpassword", []string{"default", "foo"}),
+ },
+ })
+
+}
+
+func TestBackend_policiesUpdate(t *testing.T) {
+ b, err := Factory(&logical.BackendConfig{
+ Logger: nil,
+ System: &logical.StaticSystemView{
+ DefaultLeaseTTLVal: testSysTTL,
+ MaxLeaseTTLVal: testSysMaxTTL,
+ },
+ })
+ if err != nil {
+ t.Fatalf("Unable to create backend: %s", err)
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepUser(t, "web", "password", "foo"),
+ testAccStepReadUser(t, "web", "default,foo"),
+ testAccStepLogin(t, "web", "password", []string{"default", "foo"}),
+ testUpdatePolicies(t, "web", "foo,bar"),
+ testAccStepReadUser(t, "web", "bar,default,foo"),
+ testAccStepLogin(t, "web", "password", []string{"bar", "default", "foo"}),
+ },
+ })
+
+}
+
+func testUpdatePassword(t *testing.T, user, password string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "users/" + user + "/password",
+ Data: map[string]interface{}{
+ "password": password,
+ },
+ }
+}
+
+func testUpdatePolicies(t *testing.T, user, policies string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "users/" + user + "/policies",
+ Data: map[string]interface{}{
+ "policies": policies,
+ },
+ }
+}
+
+func testUsersWrite(t *testing.T, user string, data map[string]interface{}, expectError bool) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "users/" + user,
+ Data: data,
+ ErrorOk: true,
+ Check: func(resp *logical.Response) error {
+ if resp == nil && expectError {
+ return fmt.Errorf("Expected error but received nil")
+ }
+ return nil
+ },
+ }
+}
+
+func testLoginWrite(t *testing.T, user string, data map[string]interface{}, expectError bool) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "login/" + user,
+ Data: data,
+ ErrorOk: true,
+ Check: func(resp *logical.Response) error {
+ if resp == nil && expectError {
+ return fmt.Errorf("Expected error but received nil")
+ }
+ return nil
+ },
+ }
+}
+
+func testAccStepList(t *testing.T, users []string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ListOperation,
+ Path: "users",
+ Check: func(resp *logical.Response) error {
+ if resp.IsError() {
+ return fmt.Errorf("Got error response: %#v", *resp)
+ }
+
+ exp := []string{"web", "web2", "web3"}
+ if !reflect.DeepEqual(exp, resp.Data["keys"].([]string)) {
+ return fmt.Errorf("expected:\n%#v\ngot:\n%#v\n", exp, resp.Data["keys"])
+ }
+ return nil
+ },
+ }
+}
+
+func testAccStepLogin(t *testing.T, user string, pass string, policies []string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "login/" + user,
+ Data: map[string]interface{}{
+ "password": pass,
+ },
+ Unauthenticated: true,
+
+ Check: logicaltest.TestCheckAuth(policies),
+ }
+}
+
+func testUserCreateOperation(
+ t *testing.T, name string, password string, policies string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.CreateOperation,
+ Path: "users/" + name,
+ Data: map[string]interface{}{
+ "password": password,
+ "policies": policies,
+ },
+ }
+}
+
+func testAccStepUser(
+ t *testing.T, name string, password string, policies string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "users/" + name,
+ Data: map[string]interface{}{
+ "password": password,
+ "policies": policies,
+ },
+ }
+}
+
+func testAccStepDeleteUser(t *testing.T, n string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.DeleteOperation,
+ Path: "users/" + n,
+ }
+}
+
+func testAccStepReadUser(t *testing.T, name string, policies string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "users/" + name,
+ Check: func(resp *logical.Response) error {
+ if resp == nil {
+ if policies == "" {
+ return nil
+ }
+
+ return fmt.Errorf("bad: %#v", resp)
+ }
+
+ var d struct {
+ Policies string `mapstructure:"policies"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+
+ if d.Policies != policies {
+ return fmt.Errorf("bad: %#v", resp)
+ }
+
+ return nil
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/cli.go b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/cli.go
new file mode 100644
index 0000000..80b52e3
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/cli.go
@@ -0,0 +1,85 @@
+package userpass
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/hashicorp/vault/api"
+ pwd "github.com/hashicorp/vault/helper/password"
+ "github.com/mitchellh/mapstructure"
+)
+
+type CLIHandler struct {
+ DefaultMount string
+}
+
+func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) {
+ var data struct {
+ Username string `mapstructure:"username"`
+ Password string `mapstructure:"password"`
+ Mount string `mapstructure:"mount"`
+ Method string `mapstructure:"method"`
+ Passcode string `mapstructure:"passcode"`
+ }
+ if err := mapstructure.WeakDecode(m, &data); err != nil {
+ return "", err
+ }
+
+ if data.Username == "" {
+ return "", fmt.Errorf("'username' must be specified")
+ }
+ if data.Password == "" {
+ fmt.Printf("Password (will be hidden): ")
+ password, err := pwd.Read(os.Stdin)
+ fmt.Println()
+ if err != nil {
+ return "", err
+ }
+ data.Password = password
+ }
+ if data.Mount == "" {
+ data.Mount = h.DefaultMount
+ }
+
+ options := map[string]interface{}{
+ "password": data.Password,
+ }
+ if data.Method != "" {
+ options["method"] = data.Method
+ }
+ if data.Passcode != "" {
+ options["passcode"] = data.Passcode
+ }
+
+ path := fmt.Sprintf("auth/%s/login/%s", data.Mount, data.Username)
+ secret, err := c.Logical().Write(path, options)
+ if err != nil {
+ return "", err
+ }
+ if secret == nil {
+ return "", fmt.Errorf("empty response from credential provider")
+ }
+
+ return secret.Auth.ClientToken, nil
+}
+
+func (h *CLIHandler) Help() string {
+ help := `
+The "userpass"/"radius" credential provider allows you to authenticate with
+a username and password. To use it, specify the "username" and "password"
+parameters. If password is not provided on the command line, it will be
+read from stdin.
+
+If multi-factor authentication (MFA) is enabled, a "method" and/or "passcode"
+may be provided depending on the MFA backend enabled. To check
+which MFA backend is in use, read "auth/[mount]/mfa_config".
+
+ Example: vault auth -method=userpass \
+ username= \
+ password=
+
+ `
+
+ return strings.TrimSpace(help)
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_login.go b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_login.go
new file mode 100644
index 0000000..574aa29
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_login.go
@@ -0,0 +1,109 @@
+package userpass
+
+import (
+ "crypto/subtle"
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/helper/policyutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ "golang.org/x/crypto/bcrypt"
+)
+
+func pathLogin(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "login/" + framework.GenericNameRegex("username"),
+ Fields: map[string]*framework.FieldSchema{
+ "username": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Username of the user.",
+ },
+
+ "password": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Password for this user.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathLogin,
+ },
+
+ HelpSynopsis: pathLoginSyn,
+ HelpDescription: pathLoginDesc,
+ }
+}
+
+func (b *backend) pathLogin(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ username := strings.ToLower(d.Get("username").(string))
+
+ password := d.Get("password").(string)
+ if password == "" {
+ return nil, fmt.Errorf("missing password")
+ }
+
+ // Get the user and validate auth
+ user, err := b.user(req.Storage, username)
+ if err != nil {
+ return nil, err
+ }
+ if user == nil {
+ return logical.ErrorResponse("invalid username or password"), nil
+ }
+
+ // Check for a password match. Check for a hash collision for Vault 0.2+,
+ // but handle the older legacy passwords with a constant time comparison.
+ passwordBytes := []byte(password)
+ if user.PasswordHash != nil {
+ if err := bcrypt.CompareHashAndPassword(user.PasswordHash, passwordBytes); err != nil {
+ return logical.ErrorResponse("invalid username or password"), nil
+ }
+ } else {
+ if subtle.ConstantTimeCompare([]byte(user.Password), passwordBytes) != 1 {
+ return logical.ErrorResponse("invalid username or password"), nil
+ }
+ }
+
+ return &logical.Response{
+ Auth: &logical.Auth{
+ Policies: user.Policies,
+ Metadata: map[string]string{
+ "username": username,
+ },
+ DisplayName: username,
+ LeaseOptions: logical.LeaseOptions{
+ TTL: user.TTL,
+ Renewable: true,
+ },
+ },
+ }, nil
+}
+
+func (b *backend) pathLoginRenew(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ // Get the user
+ user, err := b.user(req.Storage, req.Auth.Metadata["username"])
+ if err != nil {
+ return nil, err
+ }
+ if user == nil {
+ // User no longer exists, do not renew
+ return nil, nil
+ }
+
+ if !policyutil.EquivalentPolicies(user.Policies, req.Auth.Policies) {
+ return nil, fmt.Errorf("policies have changed, not renewing")
+ }
+
+ return framework.LeaseExtend(user.TTL, user.MaxTTL, b.System())(req, d)
+}
+
+const pathLoginSyn = `
+Log in with a username and password.
+`
+
+const pathLoginDesc = `
+This endpoint authenticates using a username and password.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_user_password.go b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_user_password.go
new file mode 100644
index 0000000..57d9b49
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_user_password.go
@@ -0,0 +1,80 @@
+package userpass
+
+import (
+ "fmt"
+
+ "golang.org/x/crypto/bcrypt"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathUserPassword(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "users/" + framework.GenericNameRegex("username") + "/password$",
+ Fields: map[string]*framework.FieldSchema{
+ "username": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Username for this user.",
+ },
+
+ "password": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Password for this user.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathUserPasswordUpdate,
+ },
+
+ HelpSynopsis: pathUserPasswordHelpSyn,
+ HelpDescription: pathUserPasswordHelpDesc,
+ }
+}
+
+func (b *backend) pathUserPasswordUpdate(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+
+ username := d.Get("username").(string)
+
+ userEntry, err := b.user(req.Storage, username)
+ if err != nil {
+ return nil, err
+ }
+ if userEntry == nil {
+ return nil, fmt.Errorf("username does not exist")
+ }
+
+ userErr, intErr := b.updateUserPassword(req, d, userEntry)
+ if intErr != nil {
+ return nil, err
+ }
+ if userErr != nil {
+ return logical.ErrorResponse(userErr.Error()), logical.ErrInvalidRequest
+ }
+
+ return nil, b.setUser(req.Storage, username, userEntry)
+}
+
+func (b *backend) updateUserPassword(req *logical.Request, d *framework.FieldData, userEntry *UserEntry) (error, error) {
+ password := d.Get("password").(string)
+ if password == "" {
+ return fmt.Errorf("missing password"), nil
+ }
+ // Generate a hash of the password
+ hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
+ if err != nil {
+ return nil, err
+ }
+ userEntry.PasswordHash = hash
+ return nil, nil
+}
+
+const pathUserPasswordHelpSyn = `
+Reset user's password.
+`
+
+const pathUserPasswordHelpDesc = `
+This endpoint allows resetting the user's password.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_user_policies.go b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_user_policies.go
new file mode 100644
index 0000000..6165c18
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_user_policies.go
@@ -0,0 +1,58 @@
+package userpass
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/vault/helper/policyutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathUserPolicies(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "users/" + framework.GenericNameRegex("username") + "/policies$",
+ Fields: map[string]*framework.FieldSchema{
+ "username": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Username for this user.",
+ },
+ "policies": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Comma-separated list of policies",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathUserPoliciesUpdate,
+ },
+
+ HelpSynopsis: pathUserPoliciesHelpSyn,
+ HelpDescription: pathUserPoliciesHelpDesc,
+ }
+}
+
+func (b *backend) pathUserPoliciesUpdate(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+
+ username := d.Get("username").(string)
+
+ userEntry, err := b.user(req.Storage, username)
+ if err != nil {
+ return nil, err
+ }
+ if userEntry == nil {
+ return nil, fmt.Errorf("username does not exist")
+ }
+
+ userEntry.Policies = policyutil.ParsePolicies(d.Get("policies").(string))
+
+ return nil, b.setUser(req.Storage, username, userEntry)
+}
+
+const pathUserPoliciesHelpSyn = `
+Update the policies associated with the username.
+`
+
+const pathUserPoliciesHelpDesc = `
+This endpoint allows updating the policies associated with the username.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_users.go b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_users.go
new file mode 100644
index 0000000..f8d4eb0
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_users.go
@@ -0,0 +1,229 @@
+package userpass
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/hashicorp/vault/helper/policyutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathUsersList(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "users/?",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.pathUserList,
+ },
+
+ HelpSynopsis: pathUserHelpSyn,
+ HelpDescription: pathUserHelpDesc,
+ }
+}
+
+func pathUsers(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "users/" + framework.GenericNameRegex("username"),
+ Fields: map[string]*framework.FieldSchema{
+ "username": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Username for this user.",
+ },
+
+ "password": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Password for this user.",
+ },
+
+ "policies": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Comma-separated list of policies",
+ },
+ "ttl": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "",
+ Description: "The lease duration which decides login expiration",
+ },
+ "max_ttl": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "",
+ Description: "Maximum duration after which login should expire",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.DeleteOperation: b.pathUserDelete,
+ logical.ReadOperation: b.pathUserRead,
+ logical.UpdateOperation: b.pathUserWrite,
+ logical.CreateOperation: b.pathUserWrite,
+ },
+
+ ExistenceCheck: b.userExistenceCheck,
+
+ HelpSynopsis: pathUserHelpSyn,
+ HelpDescription: pathUserHelpDesc,
+ }
+}
+
+func (b *backend) userExistenceCheck(req *logical.Request, data *framework.FieldData) (bool, error) {
+ userEntry, err := b.user(req.Storage, data.Get("username").(string))
+ if err != nil {
+ return false, err
+ }
+
+ return userEntry != nil, nil
+}
+
+func (b *backend) user(s logical.Storage, username string) (*UserEntry, error) {
+ if username == "" {
+ return nil, fmt.Errorf("missing username")
+ }
+
+ entry, err := s.Get("user/" + strings.ToLower(username))
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result UserEntry
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+func (b *backend) setUser(s logical.Storage, username string, userEntry *UserEntry) error {
+ entry, err := logical.StorageEntryJSON("user/"+username, userEntry)
+ if err != nil {
+ return err
+ }
+
+ return s.Put(entry)
+}
+
+func (b *backend) pathUserList(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ users, err := req.Storage.List("user/")
+ if err != nil {
+ return nil, err
+ }
+ return logical.ListResponse(users), nil
+}
+
+func (b *backend) pathUserDelete(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ err := req.Storage.Delete("user/" + strings.ToLower(d.Get("username").(string)))
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *backend) pathUserRead(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ user, err := b.user(req.Storage, strings.ToLower(d.Get("username").(string)))
+ if err != nil {
+ return nil, err
+ }
+ if user == nil {
+ return nil, nil
+ }
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "policies": strings.Join(user.Policies, ","),
+ "ttl": user.TTL.Seconds(),
+ "max_ttl": user.MaxTTL.Seconds(),
+ },
+ }, nil
+}
+
+func (b *backend) userCreateUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ username := strings.ToLower(d.Get("username").(string))
+ userEntry, err := b.user(req.Storage, username)
+ if err != nil {
+ return nil, err
+ }
+ // Due to existence check, user will only be nil if it's a create operation
+ if userEntry == nil {
+ userEntry = &UserEntry{}
+ }
+
+ if _, ok := d.GetOk("password"); ok {
+ userErr, intErr := b.updateUserPassword(req, d, userEntry)
+ if intErr != nil {
+ return nil, err
+ }
+ if userErr != nil {
+ return logical.ErrorResponse(userErr.Error()), logical.ErrInvalidRequest
+ }
+ }
+
+ if policiesRaw, ok := d.GetOk("policies"); ok {
+ userEntry.Policies = policyutil.ParsePolicies(policiesRaw.(string))
+ }
+
+ ttlStr := userEntry.TTL.String()
+ if ttlStrRaw, ok := d.GetOk("ttl"); ok {
+ ttlStr = ttlStrRaw.(string)
+ }
+
+ maxTTLStr := userEntry.MaxTTL.String()
+ if maxTTLStrRaw, ok := d.GetOk("max_ttl"); ok {
+ maxTTLStr = maxTTLStrRaw.(string)
+ }
+
+ userEntry.TTL, userEntry.MaxTTL, err = b.SanitizeTTLStr(ttlStr, maxTTLStr)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("err: %s", err)), nil
+ }
+
+ return nil, b.setUser(req.Storage, username, userEntry)
+}
+
+func (b *backend) pathUserWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ password := d.Get("password").(string)
+ if req.Operation == logical.CreateOperation && password == "" {
+ return logical.ErrorResponse("missing password"), logical.ErrInvalidRequest
+ }
+ return b.userCreateUpdate(req, d)
+}
+
+type UserEntry struct {
+ // Password is deprecated in Vault 0.2 in favor of
+ // PasswordHash, but is retained for backwards compatibility.
+ Password string
+
+ // PasswordHash is a bcrypt hash of the password. This is
+ // used instead of the actual password in Vault 0.2+.
+ PasswordHash []byte
+
+ Policies []string
+
+ // Duration after which the user will be revoked unless renewed
+ TTL time.Duration
+
+ // Maximum duration for which user can be valid
+ MaxTTL time.Duration
+}
+
+const pathUserHelpSyn = `
+Manage users allowed to authenticate.
+`
+
+const pathUserHelpDesc = `
+This endpoint allows you to create, read, update, and delete users
+that are allowed to authenticate.
+
+Deleting a user will not revoke auth for prior authenticated users
+with that name. To do this, do a revoke on "login/" for
+the username you want revoked. If you don't need to revoke login immediately,
+then the next renew will cause the lease to expire.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/backend.go
new file mode 100644
index 0000000..246e25c
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/aws/backend.go
@@ -0,0 +1,58 @@
+package aws
+
+import (
+ "strings"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
+ return Backend().Setup(conf)
+}
+
+func Backend() *backend {
+ var b backend
+ b.Backend = &framework.Backend{
+ Help: strings.TrimSpace(backendHelp),
+
+ PathsSpecial: &logical.Paths{
+ LocalStorage: []string{
+ framework.WALPrefix,
+ },
+ },
+
+ Paths: []*framework.Path{
+ pathConfigRoot(),
+ pathConfigLease(&b),
+ pathRoles(),
+ pathListRoles(&b),
+ pathUser(&b),
+ pathSTS(&b),
+ },
+
+ Secrets: []*framework.Secret{
+ secretAccessKeys(&b),
+ },
+
+ WALRollback: walRollback,
+ WALRollbackMinAge: 5 * time.Minute,
+ }
+
+ return &b
+}
+
+type backend struct {
+ *framework.Backend
+}
+
+const backendHelp = `
+The AWS backend dynamically generates AWS access keys for a set of
+IAM policies. The AWS access keys have a configurable lease set and
+are automatically revoked at the end of the lease.
+
+After mounting this backend, credentials to generate IAM keys must
+be configured with the "root" path and policies must be written using
+the "roles/" endpoints before any access keys can be generated.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/backend_test.go
new file mode 100644
index 0000000..3f04c68
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/aws/backend_test.go
@@ -0,0 +1,454 @@
+package aws
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "log"
+ "os"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/ec2"
+ "github.com/aws/aws-sdk-go/service/iam"
+ "github.com/hashicorp/go-cleanhttp"
+ "github.com/hashicorp/vault/logical"
+ logicaltest "github.com/hashicorp/vault/logical/testing"
+ "github.com/mitchellh/mapstructure"
+)
+
+func getBackend(t *testing.T) logical.Backend {
+ be, _ := Factory(logical.TestBackendConfig())
+ return be
+}
+
+func TestBackend_basic(t *testing.T) {
+ logicaltest.Test(t, logicaltest.TestCase{
+ AcceptanceTest: true,
+ PreCheck: func() { testAccPreCheck(t) },
+ Backend: getBackend(t),
+ Steps: []logicaltest.TestStep{
+ testAccStepConfig(t),
+ testAccStepWritePolicy(t, "test", testPolicy),
+ testAccStepReadUser(t, "test"),
+ },
+ })
+}
+
+func TestBackend_basicSTS(t *testing.T) {
+ logicaltest.Test(t, logicaltest.TestCase{
+ AcceptanceTest: true,
+ PreCheck: func() {
+ testAccPreCheck(t)
+ createRole(t)
+ },
+ Backend: getBackend(t),
+ Steps: []logicaltest.TestStep{
+ testAccStepConfig(t),
+ testAccStepWritePolicy(t, "test", testPolicy),
+ testAccStepReadSTS(t, "test"),
+ testAccStepWriteArnPolicyRef(t, "test", testPolicyArn),
+ testAccStepReadSTSWithArnPolicy(t, "test"),
+ testAccStepWriteArnRoleRef(t, testRoleName),
+ testAccStepReadSTS(t, testRoleName),
+ },
+ Teardown: teardown,
+ })
+}
+
+func TestBackend_policyCrud(t *testing.T) {
+ var compacted bytes.Buffer
+ if err := json.Compact(&compacted, []byte(testPolicy)); err != nil {
+ t.Fatalf("bad: %s", err)
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ AcceptanceTest: true,
+ Backend: getBackend(t),
+ Steps: []logicaltest.TestStep{
+ testAccStepConfig(t),
+ testAccStepWritePolicy(t, "test", testPolicy),
+ testAccStepReadPolicy(t, "test", compacted.String()),
+ testAccStepDeletePolicy(t, "test"),
+ testAccStepReadPolicy(t, "test", ""),
+ },
+ })
+}
+
+func testAccPreCheck(t *testing.T) {
+ if v := os.Getenv("AWS_ACCESS_KEY_ID"); v == "" {
+ t.Fatal("AWS_ACCESS_KEY_ID must be set for acceptance tests")
+ }
+
+ if v := os.Getenv("AWS_SECRET_ACCESS_KEY"); v == "" {
+ t.Fatal("AWS_SECRET_ACCESS_KEY must be set for acceptance tests")
+ }
+
+ if v := os.Getenv("AWS_DEFAULT_REGION"); v == "" {
+ log.Println("[INFO] Test: Using us-west-2 as test region")
+ os.Setenv("AWS_DEFAULT_REGION", "us-west-2")
+ }
+
+ if v := os.Getenv("AWS_ACCOUNT_ID"); v == "" {
+ accountId, err := getAccountId()
+ if err != nil {
+ t.Fatal("AWS_ACCOUNT_ID could not be read from iam:GetUser for acceptance tests")
+ }
+ log.Printf("[INFO] Test: Used %s as AWS_ACCOUNT_ID", accountId)
+ os.Setenv("AWS_ACCOUNT_ID", accountId)
+ }
+}
+
+func getAccountId() (string, error) {
+ creds := credentials.NewStaticCredentials(os.Getenv("AWS_ACCESS_KEY_ID"),
+ os.Getenv("AWS_SECRET_ACCESS_KEY"),
+ "")
+
+ awsConfig := &aws.Config{
+ Credentials: creds,
+ Region: aws.String("us-east-1"),
+ HTTPClient: cleanhttp.DefaultClient(),
+ }
+ svc := iam.New(session.New(awsConfig))
+
+ params := &iam.GetUserInput{}
+ res, err := svc.GetUser(params)
+
+ if err != nil {
+ return "", err
+ }
+
+ // split "arn:aws:iam::012345678912:user/username"
+ accountId := strings.Split(*res.User.Arn, ":")[4]
+ return accountId, nil
+}
+
+const testRoleName = "Vault-Acceptance-Test-AWS-Assume-Role"
+
+func createRole(t *testing.T) {
+ const testRoleAssumePolicy = `{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect":"Allow",
+ "Principal": {
+ "AWS": "arn:aws:iam::%s:root"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
+`
+ creds := credentials.NewStaticCredentials(os.Getenv("AWS_ACCESS_KEY_ID"), os.Getenv("AWS_SECRET_ACCESS_KEY"), "")
+
+ awsConfig := &aws.Config{
+ Credentials: creds,
+ Region: aws.String("us-east-1"),
+ HTTPClient: cleanhttp.DefaultClient(),
+ }
+ svc := iam.New(session.New(awsConfig))
+ trustPolicy := fmt.Sprintf(testRoleAssumePolicy, os.Getenv("AWS_ACCOUNT_ID"))
+
+ params := &iam.CreateRoleInput{
+ AssumeRolePolicyDocument: aws.String(trustPolicy),
+ RoleName: aws.String(testRoleName),
+ Path: aws.String("/"),
+ }
+
+ log.Printf("[INFO] AWS CreateRole: %s", testRoleName)
+ _, err := svc.CreateRole(params)
+
+ if err != nil {
+ t.Fatalf("AWS CreateRole failed: %v", err)
+ }
+
+ attachment := &iam.AttachRolePolicyInput{
+ PolicyArn: aws.String(testPolicyArn),
+ RoleName: aws.String(testRoleName), // Required
+ }
+ _, err = svc.AttachRolePolicy(attachment)
+
+ if err != nil {
+ t.Fatalf("AWS CreateRole failed: %v", err)
+ }
+
+ // Sleep sometime because AWS is eventually consistent
+ log.Println("[WARN] Sleeping for 10 seconds waiting for AWS...")
+ time.Sleep(10 * time.Second)
+}
+
+func teardown() error {
+ creds := credentials.NewStaticCredentials(os.Getenv("AWS_ACCESS_KEY_ID"), os.Getenv("AWS_SECRET_ACCESS_KEY"), "")
+
+ awsConfig := &aws.Config{
+ Credentials: creds,
+ Region: aws.String("us-east-1"),
+ HTTPClient: cleanhttp.DefaultClient(),
+ }
+ svc := iam.New(session.New(awsConfig))
+
+ attachment := &iam.DetachRolePolicyInput{
+ PolicyArn: aws.String(testPolicyArn),
+ RoleName: aws.String(testRoleName), // Required
+ }
+ _, err := svc.DetachRolePolicy(attachment)
+
+ params := &iam.DeleteRoleInput{
+ RoleName: aws.String(testRoleName),
+ }
+
+ log.Printf("[INFO] AWS DeleteRole: %s", testRoleName)
+ _, err = svc.DeleteRole(params)
+
+ if err != nil {
+ log.Printf("[WARN] AWS DeleteRole failed: %v", err)
+ }
+
+ return err
+}
+
+func testAccStepConfig(t *testing.T) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config/root",
+ Data: map[string]interface{}{
+ "access_key": os.Getenv("AWS_ACCESS_KEY_ID"),
+ "secret_key": os.Getenv("AWS_SECRET_ACCESS_KEY"),
+ "region": os.Getenv("AWS_DEFAULT_REGION"),
+ },
+ }
+}
+
+func testAccStepReadUser(t *testing.T, name string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "creds/" + name,
+ Check: func(resp *logical.Response) error {
+ var d struct {
+ AccessKey string `mapstructure:"access_key"`
+ SecretKey string `mapstructure:"secret_key"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+ log.Printf("[WARN] Generated credentials: %v", d)
+
+ // Sleep sometime because AWS is eventually consistent
+ log.Println("[WARN] Sleeping for 10 seconds waiting for AWS...")
+ time.Sleep(10 * time.Second)
+
+ // Build a client and verify that the credentials work
+ creds := credentials.NewStaticCredentials(d.AccessKey, d.SecretKey, "")
+ awsConfig := &aws.Config{
+ Credentials: creds,
+ Region: aws.String("us-east-1"),
+ HTTPClient: cleanhttp.DefaultClient(),
+ }
+ client := ec2.New(session.New(awsConfig))
+
+ log.Printf("[WARN] Verifying that the generated credentials work...")
+ _, err := client.DescribeInstances(&ec2.DescribeInstancesInput{})
+ if err != nil {
+ return err
+ }
+
+ return nil
+ },
+ }
+}
+
+func testAccStepReadSTS(t *testing.T, name string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "sts/" + name,
+ Check: func(resp *logical.Response) error {
+ var d struct {
+ AccessKey string `mapstructure:"access_key"`
+ SecretKey string `mapstructure:"secret_key"`
+ STSToken string `mapstructure:"security_token"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+ log.Printf("[WARN] Generated credentials: %v", d)
+
+ // Build a client and verify that the credentials work
+ creds := credentials.NewStaticCredentials(d.AccessKey, d.SecretKey, d.STSToken)
+ awsConfig := &aws.Config{
+ Credentials: creds,
+ Region: aws.String("us-east-1"),
+ HTTPClient: cleanhttp.DefaultClient(),
+ }
+ client := ec2.New(session.New(awsConfig))
+
+ log.Printf("[WARN] Verifying that the generated credentials work...")
+ _, err := client.DescribeInstances(&ec2.DescribeInstancesInput{})
+ if err != nil {
+ return err
+ }
+
+ return nil
+ },
+ }
+}
+
+func testAccStepReadSTSWithArnPolicy(t *testing.T, name string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "sts/" + name,
+ ErrorOk: true,
+ Check: func(resp *logical.Response) error {
+ if resp.Data["error"] !=
+ "Can't generate STS credentials for a managed policy; use a role to assume or an inline policy instead" {
+ t.Fatalf("bad: %v", resp)
+ }
+ return nil
+ },
+ }
+}
+
+func testAccStepWritePolicy(t *testing.T, name string, policy string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "roles/" + name,
+ Data: map[string]interface{}{
+ "policy": testPolicy,
+ },
+ }
+}
+
+func testAccStepDeletePolicy(t *testing.T, n string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.DeleteOperation,
+ Path: "roles/" + n,
+ }
+}
+
+func testAccStepReadPolicy(t *testing.T, name string, value string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "roles/" + name,
+ Check: func(resp *logical.Response) error {
+ if resp == nil {
+ if value == "" {
+ return nil
+ }
+
+ return fmt.Errorf("bad: %#v", resp)
+ }
+
+ var d struct {
+ Policy string `mapstructure:"policy"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+
+ if d.Policy != value {
+ return fmt.Errorf("bad: %#v", resp)
+ }
+
+ return nil
+ },
+ }
+}
+
+const testPolicy = `
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "Stmt1426528957000",
+ "Effect": "Allow",
+ "Action": [
+ "ec2:*"
+ ],
+ "Resource": [
+ "*"
+ ]
+ }
+ ]
+}
+`
+
+const testPolicyArn = "arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess"
+
+func testAccStepWriteArnPolicyRef(t *testing.T, name string, arn string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "roles/" + name,
+ Data: map[string]interface{}{
+ "arn": testPolicyArn,
+ },
+ }
+}
+
+func TestBackend_basicPolicyArnRef(t *testing.T) {
+ logicaltest.Test(t, logicaltest.TestCase{
+ AcceptanceTest: true,
+ PreCheck: func() { testAccPreCheck(t) },
+ Backend: getBackend(t),
+ Steps: []logicaltest.TestStep{
+ testAccStepConfig(t),
+ testAccStepWriteArnPolicyRef(t, "test", testPolicyArn),
+ testAccStepReadUser(t, "test"),
+ },
+ })
+}
+
+func TestBackend_policyArnCrud(t *testing.T) {
+ logicaltest.Test(t, logicaltest.TestCase{
+ AcceptanceTest: true,
+ Backend: getBackend(t),
+ Steps: []logicaltest.TestStep{
+ testAccStepConfig(t),
+ testAccStepWriteArnPolicyRef(t, "test", testPolicyArn),
+ testAccStepReadArnPolicy(t, "test", testPolicyArn),
+ testAccStepDeletePolicy(t, "test"),
+ testAccStepReadArnPolicy(t, "test", ""),
+ },
+ })
+}
+
+func testAccStepReadArnPolicy(t *testing.T, name string, value string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "roles/" + name,
+ Check: func(resp *logical.Response) error {
+ if resp == nil {
+ if value == "" {
+ return nil
+ }
+
+ return fmt.Errorf("bad: %#v", resp)
+ }
+
+ var d struct {
+ Policy string `mapstructure:"arn"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+
+ if d.Policy != value {
+ return fmt.Errorf("bad: %#v", resp)
+ }
+
+ return nil
+ },
+ }
+}
+
+func testAccStepWriteArnRoleRef(t *testing.T, roleName string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "roles/" + roleName,
+ Data: map[string]interface{}{
+ "arn": fmt.Sprintf("arn:aws:iam::%s:role/%s", os.Getenv("AWS_ACCOUNT_ID"), roleName),
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/client.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/client.go
new file mode 100644
index 0000000..545c685
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/aws/client.go
@@ -0,0 +1,59 @@
+package aws
+
+import (
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/iam"
+ "github.com/aws/aws-sdk-go/service/sts"
+ "github.com/hashicorp/go-cleanhttp"
+ "github.com/hashicorp/vault/helper/awsutil"
+ "github.com/hashicorp/vault/logical"
+)
+
+func getRootConfig(s logical.Storage) (*aws.Config, error) {
+ credsConfig := &awsutil.CredentialsConfig{}
+
+ entry, err := s.Get("config/root")
+ if err != nil {
+ return nil, err
+ }
+ if entry != nil {
+ var config rootConfig
+ if err := entry.DecodeJSON(&config); err != nil {
+ return nil, fmt.Errorf("error reading root configuration: %s", err)
+ }
+
+ credsConfig.AccessKey = config.AccessKey
+ credsConfig.SecretKey = config.SecretKey
+ credsConfig.Region = config.Region
+ }
+
+ if credsConfig.Region == "" {
+ credsConfig.Region = "us-east-1"
+ }
+
+ credsConfig.HTTPClient = cleanhttp.DefaultClient()
+
+ creds, err := credsConfig.GenerateCredentialChain()
+ if err != nil {
+ return nil, err
+ }
+
+ return &aws.Config{
+ Credentials: creds,
+ Region: aws.String(credsConfig.Region),
+ HTTPClient: cleanhttp.DefaultClient(),
+ }, nil
+}
+
+func clientIAM(s logical.Storage) (*iam.IAM, error) {
+ awsConfig, _ := getRootConfig(s)
+ return iam.New(session.New(awsConfig)), nil
+}
+
+func clientSTS(s logical.Storage) (*sts.STS, error) {
+ awsConfig, _ := getRootConfig(s)
+ return sts.New(session.New(awsConfig)), nil
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_config_lease.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_config_lease.go
new file mode 100644
index 0000000..3decbfb
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_config_lease.go
@@ -0,0 +1,128 @@
+package aws
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathConfigLease(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "config/lease",
+ Fields: map[string]*framework.FieldSchema{
+ "lease": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Default lease for roles.",
+ },
+
+ "lease_max": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Maximum time a credential is valid for.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathLeaseRead,
+ logical.UpdateOperation: b.pathLeaseWrite,
+ },
+
+ HelpSynopsis: pathConfigLeaseHelpSyn,
+ HelpDescription: pathConfigLeaseHelpDesc,
+ }
+}
+
+// Lease returns the lease information
+func (b *backend) Lease(s logical.Storage) (*configLease, error) {
+ entry, err := s.Get("config/lease")
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result configLease
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+func (b *backend) pathLeaseWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ leaseRaw := d.Get("lease").(string)
+ leaseMaxRaw := d.Get("lease_max").(string)
+
+ if len(leaseRaw) == 0 {
+ return logical.ErrorResponse("'lease' is a required parameter"), nil
+ }
+ if len(leaseMaxRaw) == 0 {
+ return logical.ErrorResponse("'lease_max' is a required parameter"), nil
+ }
+
+ lease, err := time.ParseDuration(leaseRaw)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Invalid lease: %s", err)), nil
+ }
+ leaseMax, err := time.ParseDuration(leaseMaxRaw)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Invalid lease_max: %s", err)), nil
+ }
+
+ // Store it
+ entry, err := logical.StorageEntryJSON("config/lease", &configLease{
+ Lease: lease,
+ LeaseMax: leaseMax,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *backend) pathLeaseRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ lease, err := b.Lease(req.Storage)
+
+ if err != nil {
+ return nil, err
+ }
+ if lease == nil {
+ return nil, nil
+ }
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "lease": lease.Lease.String(),
+ "lease_max": lease.LeaseMax.String(),
+ },
+ }, nil
+}
+
+type configLease struct {
+ Lease time.Duration
+ LeaseMax time.Duration
+}
+
+const pathConfigLeaseHelpSyn = `
+Configure the default lease information for generated credentials.
+`
+
+const pathConfigLeaseHelpDesc = `
+This configures the default lease information used for credentials
+generated by this backend. The lease specifies the duration that a
+credential will be valid for, as well as the maximum session for
+a set of credentials.
+
+The format for the lease is "1h" or integer and then unit. The longest
+unit is hour.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_config_root.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_config_root.go
new file mode 100644
index 0000000..0d1d1d5
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_config_root.go
@@ -0,0 +1,75 @@
+package aws
+
+import (
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathConfigRoot() *framework.Path {
+ return &framework.Path{
+ Pattern: "config/root",
+ Fields: map[string]*framework.FieldSchema{
+ "access_key": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Access key with permission to create new keys.",
+ },
+
+ "secret_key": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Secret key with permission to create new keys.",
+ },
+
+ "region": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Region for API calls.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: pathConfigRootWrite,
+ },
+
+ HelpSynopsis: pathConfigRootHelpSyn,
+ HelpDescription: pathConfigRootHelpDesc,
+ }
+}
+
+func pathConfigRootWrite(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ region := data.Get("region").(string)
+ if region == "" {
+ region = "us-east-1"
+ }
+
+ entry, err := logical.StorageEntryJSON("config/root", rootConfig{
+ AccessKey: data.Get("access_key").(string),
+ SecretKey: data.Get("secret_key").(string),
+ Region: region,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+type rootConfig struct {
+ AccessKey string `json:"access_key"`
+ SecretKey string `json:"secret_key"`
+ Region string `json:"region"`
+}
+
+const pathConfigRootHelpSyn = `
+Configure the root credentials that are used to manage IAM.
+`
+
+const pathConfigRootHelpDesc = `
+Before doing anything, the AWS backend needs credentials that are able
+to manage IAM policies, users, access keys, etc. This endpoint is used
+to configure those credentials. They don't necessarilly need to be root
+keys as long as they have permission to manage IAM.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_roles.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_roles.go
new file mode 100644
index 0000000..acd4cae
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_roles.go
@@ -0,0 +1,174 @@
+package aws
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+
+ "errors"
+ "strings"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathListRoles(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "roles/?$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.pathRoleList,
+ },
+
+ HelpSynopsis: pathListRolesHelpSyn,
+ HelpDescription: pathListRolesHelpDesc,
+ }
+}
+
+func pathRoles() *framework.Path {
+ return &framework.Path{
+ Pattern: "roles/" + framework.GenericNameRegex("name"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the policy",
+ },
+
+ "arn": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "ARN Reference to a managed policy",
+ },
+
+ "policy": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "IAM policy document",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.DeleteOperation: pathRolesDelete,
+ logical.ReadOperation: pathRolesRead,
+ logical.UpdateOperation: pathRolesWrite,
+ },
+
+ HelpSynopsis: pathRolesHelpSyn,
+ HelpDescription: pathRolesHelpDesc,
+ }
+}
+
+func (b *backend) pathRoleList(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ entries, err := req.Storage.List("policy/")
+ if err != nil {
+ return nil, err
+ }
+ return logical.ListResponse(entries), nil
+}
+
+func pathRolesDelete(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ err := req.Storage.Delete("policy/" + d.Get("name").(string))
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func pathRolesRead(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ entry, err := req.Storage.Get("policy/" + d.Get("name").(string))
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ val := string(entry.Value)
+ if strings.HasPrefix(val, "arn:") {
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "arn": val,
+ },
+ }, nil
+ }
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "policy": val,
+ },
+ }, nil
+}
+
+func useInlinePolicy(d *framework.FieldData) (bool, error) {
+ bp := d.Get("policy").(string) != ""
+ ba := d.Get("arn").(string) != ""
+
+ if !bp && !ba {
+ return false, errors.New("Either policy or arn must be provided")
+ }
+ if bp && ba {
+ return false, errors.New("Only one of policy or arn should be provided")
+ }
+ return bp, nil
+}
+
+func pathRolesWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ var buf bytes.Buffer
+
+ uip, err := useInlinePolicy(d)
+ if err != nil {
+ return nil, err
+ }
+
+ if uip {
+ if err := json.Compact(&buf, []byte(d.Get("policy").(string))); err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Error compacting policy: %s", err)), nil
+ }
+ // Write the policy into storage
+ err := req.Storage.Put(&logical.StorageEntry{
+ Key: "policy/" + d.Get("name").(string),
+ Value: buf.Bytes(),
+ })
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ // Write the arn ref into storage
+ err := req.Storage.Put(&logical.StorageEntry{
+ Key: "policy/" + d.Get("name").(string),
+ Value: []byte(d.Get("arn").(string)),
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return nil, nil
+}
+
+const pathListRolesHelpSyn = `List the existing roles in this backend`
+
+const pathListRolesHelpDesc = `Roles will be listed by the role name.`
+
+const pathRolesHelpSyn = `
+Read, write and reference IAM policies that access keys can be made for.
+`
+
+const pathRolesHelpDesc = `
+This path allows you to read and write roles that are used to
+create access keys. These roles are associated with IAM policies that
+map directly to the route to read the access keys. For example, if the
+backend is mounted at "aws" and you create a role at "aws/roles/deploy"
+then a user could request access credentials at "aws/creds/deploy".
+
+You can either supply a user inline policy (via the policy argument), or
+provide a reference to an existing AWS policy by supplying the full arn
+reference (via the arn argument). Inline user policies written are normal
+IAM policies. Vault will not attempt to parse these except to validate
+that they're basic JSON. No validation is performed on arn references.
+
+To validate the keys, attempt to read an access key after writing the policy.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_roles_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_roles_test.go
new file mode 100644
index 0000000..08bbca9
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_roles_test.go
@@ -0,0 +1,64 @@
+package aws
+
+import (
+ "strconv"
+ "testing"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestBackend_PathListRoles(t *testing.T) {
+ var resp *logical.Response
+ var err error
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+
+ b := Backend()
+ if _, err := b.Setup(config); err != nil {
+ t.Fatal(err)
+ }
+
+ roleData := map[string]interface{}{
+ "arn": "testarn",
+ }
+
+ roleReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Storage: config.StorageView,
+ Data: roleData,
+ }
+
+ for i := 1; i <= 10; i++ {
+ roleReq.Path = "roles/testrole" + strconv.Itoa(i)
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: role creation failed. resp:%#v\n err:%v", resp, err)
+ }
+ }
+
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.ListOperation,
+ Path: "roles",
+ Storage: config.StorageView,
+ })
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: listing roles failed. resp:%#v\n err:%v", resp, err)
+ }
+
+ if len(resp.Data["keys"].([]string)) != 10 {
+ t.Fatalf("failed to list all 10 roles")
+ }
+
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.ListOperation,
+ Path: "roles/",
+ Storage: config.StorageView,
+ })
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: listing roles failed. resp:%#v\n err:%v", resp, err)
+ }
+
+ if len(resp.Data["keys"].([]string)) != 10 {
+ t.Fatalf("failed to list all 10 roles")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_sts.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_sts.go
new file mode 100644
index 0000000..25f5cfa
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_sts.go
@@ -0,0 +1,92 @@
+package aws
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathSTS(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "sts/" + framework.GenericNameRegex("name"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role",
+ },
+ "ttl": &framework.FieldSchema{
+ Type: framework.TypeDurationSecond,
+ Description: `Lifetime of the token in seconds.
+AWS documentation excerpt: The duration, in seconds, that the credentials
+should remain valid. Acceptable durations for IAM user sessions range from 900
+seconds (15 minutes) to 129600 seconds (36 hours), with 43200 seconds (12
+hours) as the default. Sessions for AWS account owners are restricted to a
+maximum of 3600 seconds (one hour). If the duration is longer than one hour,
+the session for AWS account owners defaults to one hour.`,
+ Default: 3600,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathSTSRead,
+ logical.UpdateOperation: b.pathSTSRead,
+ },
+
+ HelpSynopsis: pathSTSHelpSyn,
+ HelpDescription: pathSTSHelpDesc,
+ }
+}
+
+func (b *backend) pathSTSRead(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ policyName := d.Get("name").(string)
+ ttl := int64(d.Get("ttl").(int))
+
+ // Read the policy
+ policy, err := req.Storage.Get("policy/" + policyName)
+ if err != nil {
+ return nil, fmt.Errorf("error retrieving role: %s", err)
+ }
+ if policy == nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Role '%s' not found", policyName)), nil
+ }
+ policyValue := string(policy.Value)
+ if strings.HasPrefix(policyValue, "arn:") {
+ if strings.Contains(policyValue, ":role/") {
+ return b.assumeRole(
+ req.Storage,
+ req.DisplayName, policyName, policyValue,
+ ttl,
+ )
+ } else {
+ return logical.ErrorResponse(
+ "Can't generate STS credentials for a managed policy; use a role to assume or an inline policy instead"),
+ logical.ErrInvalidRequest
+ }
+ }
+ // Use the helper to create the secret
+ return b.secretTokenCreate(
+ req.Storage,
+ req.DisplayName, policyName, policyValue,
+ ttl,
+ )
+}
+
+const pathSTSHelpSyn = `
+Generate an access key pair + security token for a specific role.
+`
+
+const pathSTSHelpDesc = `
+This path will generate a new, never before used key pair + security token for
+accessing AWS. The IAM policy used to back this key pair will be
+the "name" parameter. For example, if this backend is mounted at "aws",
+then "aws/sts/deploy" would generate access keys for the "deploy" role.
+
+Note, these credentials are instantiated using the AWS STS backend.
+
+The access keys will have a lease associated with them. The access keys
+can be revoked by using the lease ID.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_user.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_user.go
new file mode 100644
index 0000000..cab2728
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_user.go
@@ -0,0 +1,174 @@
+package aws
+
+import (
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/iam"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ "github.com/mitchellh/mapstructure"
+)
+
+func pathUser(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "creds/" + framework.GenericNameRegex("name"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathUserRead,
+ },
+
+ HelpSynopsis: pathUserHelpSyn,
+ HelpDescription: pathUserHelpDesc,
+ }
+}
+
+func (b *backend) pathUserRead(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ policyName := d.Get("name").(string)
+
+ // Read the policy
+ policy, err := req.Storage.Get("policy/" + policyName)
+ if err != nil {
+ return nil, fmt.Errorf("error retrieving role: %s", err)
+ }
+ if policy == nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Role '%s' not found", policyName)), nil
+ }
+
+ // Use the helper to create the secret
+ return b.secretAccessKeysCreate(
+ req.Storage, req.DisplayName, policyName, string(policy.Value))
+}
+
+func pathUserRollback(req *logical.Request, _kind string, data interface{}) error {
+ var entry walUser
+ if err := mapstructure.Decode(data, &entry); err != nil {
+ return err
+ }
+ username := entry.UserName
+
+ // Get the client
+ client, err := clientIAM(req.Storage)
+ if err != nil {
+ return err
+ }
+
+ // Get information about this user
+ groupsResp, err := client.ListGroupsForUser(&iam.ListGroupsForUserInput{
+ UserName: aws.String(username),
+ MaxItems: aws.Int64(1000),
+ })
+ if err != nil {
+ return err
+ }
+ groups := groupsResp.Groups
+
+ // Inline (user) policies
+ policiesResp, err := client.ListUserPolicies(&iam.ListUserPoliciesInput{
+ UserName: aws.String(username),
+ MaxItems: aws.Int64(1000),
+ })
+ if err != nil {
+ return err
+ }
+ policies := policiesResp.PolicyNames
+
+ // Attached managed policies
+ manPoliciesResp, err := client.ListAttachedUserPolicies(&iam.ListAttachedUserPoliciesInput{
+ UserName: aws.String(username),
+ MaxItems: aws.Int64(1000),
+ })
+ if err != nil {
+ return err
+ }
+ manPolicies := manPoliciesResp.AttachedPolicies
+
+ keysResp, err := client.ListAccessKeys(&iam.ListAccessKeysInput{
+ UserName: aws.String(username),
+ MaxItems: aws.Int64(1000),
+ })
+ if err != nil {
+ return err
+ }
+ keys := keysResp.AccessKeyMetadata
+
+ // Revoke all keys
+ for _, k := range keys {
+ _, err = client.DeleteAccessKey(&iam.DeleteAccessKeyInput{
+ AccessKeyId: k.AccessKeyId,
+ UserName: aws.String(username),
+ })
+ if err != nil {
+ return err
+ }
+ }
+
+ // Detach managed policies
+ for _, p := range manPolicies {
+ _, err = client.DetachUserPolicy(&iam.DetachUserPolicyInput{
+ UserName: aws.String(username),
+ PolicyArn: p.PolicyArn,
+ })
+ if err != nil {
+ return err
+ }
+ }
+
+ // Delete any inline (user) policies
+ for _, p := range policies {
+ _, err = client.DeleteUserPolicy(&iam.DeleteUserPolicyInput{
+ UserName: aws.String(username),
+ PolicyName: p,
+ })
+ if err != nil {
+ return err
+ }
+ }
+
+ // Remove the user from all their groups
+ for _, g := range groups {
+ _, err = client.RemoveUserFromGroup(&iam.RemoveUserFromGroupInput{
+ GroupName: g.GroupName,
+ UserName: aws.String(username),
+ })
+ if err != nil {
+ return err
+ }
+ }
+
+ // Delete the user
+ _, err = client.DeleteUser(&iam.DeleteUserInput{
+ UserName: aws.String(username),
+ })
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+type walUser struct {
+ UserName string
+}
+
+const pathUserHelpSyn = `
+Generate an access key pair for a specific role.
+`
+
+const pathUserHelpDesc = `
+This path will generate a new, never before used key pair for
+accessing AWS. The IAM policy used to back this key pair will be
+the "name" parameter. For example, if this backend is mounted at "aws",
+then "aws/creds/deploy" would generate access keys for the "deploy" role.
+
+The access keys will have a lease associated with them. The access keys
+can be revoked by using the lease ID.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/rollback.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/rollback.go
new file mode 100644
index 0000000..5d1b335
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/aws/rollback.go
@@ -0,0 +1,21 @@
+package aws
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+var walRollbackMap = map[string]framework.WALRollbackFunc{
+ "user": pathUserRollback,
+}
+
+func walRollback(req *logical.Request, kind string, data interface{}) error {
+ f, ok := walRollbackMap[kind]
+ if !ok {
+ return fmt.Errorf("unknown type to rollback")
+ }
+
+ return f(req, kind, data)
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/secret_access_keys.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/secret_access_keys.go
new file mode 100644
index 0000000..637bf9d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/aws/secret_access_keys.go
@@ -0,0 +1,320 @@
+package aws
+
+import (
+ "fmt"
+ "math/rand"
+ "regexp"
+ "time"
+
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/iam"
+ "github.com/aws/aws-sdk-go/service/sts"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+const SecretAccessKeyType = "access_keys"
+
+func secretAccessKeys(b *backend) *framework.Secret {
+ return &framework.Secret{
+ Type: SecretAccessKeyType,
+ Fields: map[string]*framework.FieldSchema{
+ "access_key": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Access Key",
+ },
+
+ "secret_key": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Secret Key",
+ },
+ "security_token": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Security Token",
+ },
+ },
+
+ Renew: b.secretAccessKeysRenew,
+ Revoke: secretAccessKeysRevoke,
+ }
+}
+
+func genUsername(displayName, policyName, userType string) (ret string, warning string) {
+ var midString string
+
+ switch userType {
+ case "iam_user":
+ // IAM users are capped at 64 chars; this leaves, after the beginning and
+ // end added below, 42 chars to play with.
+ midString = fmt.Sprintf("%s-%s-",
+ normalizeDisplayName(displayName),
+ normalizeDisplayName(policyName))
+ if len(midString) > 42 {
+ midString = midString[0:42]
+ warning = "the calling token display name/IAM policy name were truncated to find into IAM username length limits"
+ }
+ case "sts":
+ // Capped at 32 chars, which leaves only a couple of characters to play
+ // with, so don't insert display name or policy name at all
+ }
+
+ ret = fmt.Sprintf("vault-%s%d-%d", midString, time.Now().Unix(), rand.Int31n(10000))
+ return
+}
+
+func (b *backend) secretTokenCreate(s logical.Storage,
+ displayName, policyName, policy string,
+ lifeTimeInSeconds int64) (*logical.Response, error) {
+ STSClient, err := clientSTS(s)
+ if err != nil {
+ return logical.ErrorResponse(err.Error()), nil
+ }
+
+ username, usernameWarning := genUsername(displayName, policyName, "sts")
+
+ tokenResp, err := STSClient.GetFederationToken(
+ &sts.GetFederationTokenInput{
+ Name: aws.String(username),
+ Policy: aws.String(policy),
+ DurationSeconds: &lifeTimeInSeconds,
+ })
+
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Error generating STS keys: %s", err)), nil
+ }
+
+ resp := b.Secret(SecretAccessKeyType).Response(map[string]interface{}{
+ "access_key": *tokenResp.Credentials.AccessKeyId,
+ "secret_key": *tokenResp.Credentials.SecretAccessKey,
+ "security_token": *tokenResp.Credentials.SessionToken,
+ }, map[string]interface{}{
+ "username": username,
+ "policy": policy,
+ "is_sts": true,
+ })
+
+ // Set the secret TTL to appropriately match the expiration of the token
+ resp.Secret.TTL = tokenResp.Credentials.Expiration.Sub(time.Now())
+
+ // STS are purposefully short-lived and aren't renewable
+ resp.Secret.Renewable = false
+
+ if usernameWarning != "" {
+ resp.AddWarning(usernameWarning)
+ }
+
+ return resp, nil
+}
+
+func (b *backend) assumeRole(s logical.Storage,
+ displayName, policyName, policy string,
+ lifeTimeInSeconds int64) (*logical.Response, error) {
+ STSClient, err := clientSTS(s)
+ if err != nil {
+ return logical.ErrorResponse(err.Error()), nil
+ }
+
+ username, usernameWarning := genUsername(displayName, policyName, "iam_user")
+
+ tokenResp, err := STSClient.AssumeRole(
+ &sts.AssumeRoleInput{
+ RoleSessionName: aws.String(username),
+ RoleArn: aws.String(policy),
+ DurationSeconds: &lifeTimeInSeconds,
+ })
+
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Error assuming role: %s", err)), nil
+ }
+
+ resp := b.Secret(SecretAccessKeyType).Response(map[string]interface{}{
+ "access_key": *tokenResp.Credentials.AccessKeyId,
+ "secret_key": *tokenResp.Credentials.SecretAccessKey,
+ "security_token": *tokenResp.Credentials.SessionToken,
+ }, map[string]interface{}{
+ "username": username,
+ "policy": policy,
+ "is_sts": true,
+ })
+
+ // Set the secret TTL to appropriately match the expiration of the token
+ resp.Secret.TTL = tokenResp.Credentials.Expiration.Sub(time.Now())
+
+ // STS are purposefully short-lived and aren't renewable
+ resp.Secret.Renewable = false
+
+ if usernameWarning != "" {
+ resp.AddWarning(usernameWarning)
+ }
+
+ return resp, nil
+}
+
+func (b *backend) secretAccessKeysCreate(
+ s logical.Storage,
+ displayName, policyName string, policy string) (*logical.Response, error) {
+ client, err := clientIAM(s)
+ if err != nil {
+ return logical.ErrorResponse(err.Error()), nil
+ }
+
+ username, usernameWarning := genUsername(displayName, policyName, "iam_user")
+
+ // Write to the WAL that this user will be created. We do this before
+ // the user is created because if switch the order then the WAL put
+ // can fail, which would put us in an awkward position: we have a user
+ // we need to rollback but can't put the WAL entry to do the rollback.
+ walId, err := framework.PutWAL(s, "user", &walUser{
+ UserName: username,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("Error writing WAL entry: %s", err)
+ }
+
+ // Create the user
+ _, err = client.CreateUser(&iam.CreateUserInput{
+ UserName: aws.String(username),
+ })
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Error creating IAM user: %s", err)), nil
+ }
+
+ if strings.HasPrefix(policy, "arn:") {
+ // Attach existing policy against user
+ _, err = client.AttachUserPolicy(&iam.AttachUserPolicyInput{
+ UserName: aws.String(username),
+ PolicyArn: aws.String(policy),
+ })
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Error attaching user policy: %s", err)), nil
+ }
+
+ } else {
+ // Add new inline user policy against user
+ _, err = client.PutUserPolicy(&iam.PutUserPolicyInput{
+ UserName: aws.String(username),
+ PolicyName: aws.String(policyName),
+ PolicyDocument: aws.String(policy),
+ })
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Error putting user policy: %s", err)), nil
+ }
+ }
+
+ // Create the keys
+ keyResp, err := client.CreateAccessKey(&iam.CreateAccessKeyInput{
+ UserName: aws.String(username),
+ })
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Error creating access keys: %s", err)), nil
+ }
+
+ // Remove the WAL entry, we succeeded! If we fail, we don't return
+ // the secret because it'll get rolled back anyways, so we have to return
+ // an error here.
+ if err := framework.DeleteWAL(s, walId); err != nil {
+ return nil, fmt.Errorf("Failed to commit WAL entry: %s", err)
+ }
+
+ // Return the info!
+ resp := b.Secret(SecretAccessKeyType).Response(map[string]interface{}{
+ "access_key": *keyResp.AccessKey.AccessKeyId,
+ "secret_key": *keyResp.AccessKey.SecretAccessKey,
+ "security_token": nil,
+ }, map[string]interface{}{
+ "username": username,
+ "policy": policy,
+ "is_sts": false,
+ })
+
+ lease, err := b.Lease(s)
+ if err != nil || lease == nil {
+ lease = &configLease{}
+ }
+
+ resp.Secret.TTL = lease.Lease
+
+ if usernameWarning != "" {
+ resp.AddWarning(usernameWarning)
+ }
+
+ return resp, nil
+}
+
+func (b *backend) secretAccessKeysRenew(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+
+ // STS already has a lifetime, and we don't support renewing it
+ isSTSRaw, ok := req.Secret.InternalData["is_sts"]
+ if ok {
+ isSTS, ok := isSTSRaw.(bool)
+ if ok {
+ if isSTS {
+ return nil, nil
+ }
+ }
+ }
+
+ lease, err := b.Lease(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+ if lease == nil {
+ lease = &configLease{}
+ }
+
+ f := framework.LeaseExtend(lease.Lease, lease.LeaseMax, b.System())
+ return f(req, d)
+}
+
+func secretAccessKeysRevoke(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+
+ // STS cleans up after itself so we can skip this if is_sts internal data
+ // element set to true. If is_sts is not set, assumes old version
+ // and defaults to the IAM approach.
+ isSTSRaw, ok := req.Secret.InternalData["is_sts"]
+ if ok {
+ isSTS, ok := isSTSRaw.(bool)
+ if ok {
+ if isSTS {
+ return nil, nil
+ }
+ } else {
+ return nil, fmt.Errorf("secret has is_sts but value could not be understood")
+ }
+ }
+
+ // Get the username from the internal data
+ usernameRaw, ok := req.Secret.InternalData["username"]
+ if !ok {
+ return nil, fmt.Errorf("secret is missing username internal data")
+ }
+ username, ok := usernameRaw.(string)
+ if !ok {
+ return nil, fmt.Errorf("secret is missing username internal data")
+ }
+
+ // Use the user rollback mechanism to delete this user
+ err := pathUserRollback(req, "user", map[string]interface{}{
+ "username": username,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func normalizeDisplayName(displayName string) string {
+ re := regexp.MustCompile("[^a-zA-Z0-9+=,.@_-]")
+ return re.ReplaceAllString(displayName, "_")
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/secret_access_keys_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/secret_access_keys_test.go
new file mode 100644
index 0000000..c7a19de
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/aws/secret_access_keys_test.go
@@ -0,0 +1,44 @@
+package aws
+
+import (
+ "testing"
+)
+
+func TestNormalizeDisplayName_NormRequired(t *testing.T) {
+
+ invalidNames := map[string]string{
+ "^#$test name\nshould be normalized)(*": "___test_name_should_be_normalized___",
+ "^#$test name1 should be normalized)(*": "___test_name1_should_be_normalized___",
+ "^#$test name should be normalized)(*": "___test_name__should_be_normalized___",
+ "^#$test name__should be normalized)(*": "___test_name__should_be_normalized___",
+ }
+
+ for k, v := range invalidNames {
+ normalizedName := normalizeDisplayName(k)
+ if normalizedName != v {
+ t.Fatalf(
+ "normalizeDisplayName does not normalize AWS name correctly: %s should resolve to %s",
+ k,
+ normalizedName)
+ }
+ }
+}
+
+func TestNormalizeDisplayName_NormNotRequired(t *testing.T) {
+
+ validNames := []string{
+ "test_name_should_normalize_to_itself@example.com",
+ "test1_name_should_normalize_to_itself@example.com",
+ "UPPERlower0123456789-_,.@example.com",
+ }
+
+ for _, n := range validNames {
+ normalizedName := normalizeDisplayName(n)
+ if normalizedName != n {
+ t.Fatalf(
+ "normalizeDisplayName erroneously normalizes valid names: expected %s but normalized to %s",
+ n,
+ normalizedName)
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/backend.go
new file mode 100644
index 0000000..c2e769c
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/backend.go
@@ -0,0 +1,124 @@
+package cassandra
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+
+ "github.com/gocql/gocql"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+// Factory creates a new backend
+func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
+ return Backend().Setup(conf)
+}
+
+// Backend contains the base information for the backend's functionality
+func Backend() *backend {
+ var b backend
+ b.Backend = &framework.Backend{
+ Help: strings.TrimSpace(backendHelp),
+
+ Paths: []*framework.Path{
+ pathConfigConnection(&b),
+ pathRoles(&b),
+ pathCredsCreate(&b),
+ },
+
+ Secrets: []*framework.Secret{
+ secretCreds(&b),
+ },
+
+ Invalidate: b.invalidate,
+
+ Clean: func() {
+ b.ResetDB(nil)
+ },
+ }
+
+ return &b
+}
+
+type backend struct {
+ *framework.Backend
+
+ // Session is goroutine safe, however, since we reinitialize
+ // it when connection info changes, we want to make sure we
+ // can close it and use a new connection; hence the lock
+ session *gocql.Session
+ lock sync.Mutex
+}
+
+type sessionConfig struct {
+ Hosts string `json:"hosts" structs:"hosts" mapstructure:"hosts"`
+ Username string `json:"username" structs:"username" mapstructure:"username"`
+ Password string `json:"password" structs:"password" mapstructure:"password"`
+ TLS bool `json:"tls" structs:"tls" mapstructure:"tls"`
+ InsecureTLS bool `json:"insecure_tls" structs:"insecure_tls" mapstructure:"insecure_tls"`
+ Certificate string `json:"certificate" structs:"certificate" mapstructure:"certificate"`
+ PrivateKey string `json:"private_key" structs:"private_key" mapstructure:"private_key"`
+ IssuingCA string `json:"issuing_ca" structs:"issuing_ca" mapstructure:"issuing_ca"`
+ ProtocolVersion int `json:"protocol_version" structs:"protocol_version" mapstructure:"protocol_version"`
+ ConnectTimeout int `json:"connect_timeout" structs:"connect_timeout" mapstructure:"connect_timeout"`
+ TLSMinVersion string `json:"tls_min_version" structs:"tls_min_version" mapstructure:"tls_min_version"`
+}
+
+// DB returns the database connection.
+func (b *backend) DB(s logical.Storage) (*gocql.Session, error) {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ // If we already have a DB, we got it!
+ if b.session != nil {
+ return b.session, nil
+ }
+
+ entry, err := s.Get("config/connection")
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil,
+ fmt.Errorf("Configure the DB connection with config/connection first")
+ }
+
+ config := &sessionConfig{}
+ if err := entry.DecodeJSON(config); err != nil {
+ return nil, err
+ }
+
+ session, err := createSession(config, s)
+ // Store the session in backend for reuse
+ b.session = session
+
+ return session, err
+
+}
+
+// ResetDB forces a connection next time DB() is called.
+func (b *backend) ResetDB(newSession *gocql.Session) {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ if b.session != nil {
+ b.session.Close()
+ }
+
+ b.session = newSession
+}
+
+func (b *backend) invalidate(key string) {
+ switch key {
+ case "config/connection":
+ b.ResetDB(nil)
+ }
+}
+
+const backendHelp = `
+The Cassandra backend dynamically generates database users.
+
+After mounting this backend, configure it using the endpoints within
+the "config/" path.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/backend_test.go
new file mode 100644
index 0000000..b84ce0d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/backend_test.go
@@ -0,0 +1,225 @@
+package cassandra
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+ logicaltest "github.com/hashicorp/vault/logical/testing"
+ "github.com/mitchellh/mapstructure"
+ dockertest "gopkg.in/ory-am/dockertest.v2"
+)
+
+var (
+ testImagePull sync.Once
+)
+
+func prepareTestContainer(t *testing.T, s logical.Storage, b logical.Backend) (cid dockertest.ContainerID, retURL string) {
+ if os.Getenv("CASSANDRA_HOST") != "" {
+ return "", os.Getenv("CASSANDRA_HOST")
+ }
+
+ // Without this the checks for whether the container has started seem to
+ // never actually pass. There's really no reason to expose the test
+ // containers, so don't.
+ dockertest.BindDockerToLocalhost = "yep"
+
+ testImagePull.Do(func() {
+ dockertest.Pull("cassandra")
+ })
+
+ cwd, _ := os.Getwd()
+
+ cid, connErr := dockertest.ConnectToCassandra("latest", 60, 1000*time.Millisecond, func(connURL string) bool {
+ // This will cause a validation to run
+ resp, err := b.HandleRequest(&logical.Request{
+ Storage: s,
+ Operation: logical.UpdateOperation,
+ Path: "config/connection",
+ Data: map[string]interface{}{
+ "hosts": connURL,
+ "username": "cassandra",
+ "password": "cassandra",
+ "protocol_version": 3,
+ },
+ })
+ if err != nil || (resp != nil && resp.IsError()) {
+ // It's likely not up and running yet, so return false and try again
+ return false
+ }
+
+ retURL = connURL
+ return true
+ }, []string{"-v", cwd + "/test-fixtures/:/etc/cassandra/"}...)
+
+ if connErr != nil {
+ if cid != "" {
+ cid.KillRemove()
+ }
+ t.Fatalf("could not connect to database: %v", connErr)
+ }
+
+ return
+}
+
+func cleanupTestContainer(t *testing.T, cid dockertest.ContainerID) {
+ err := cid.KillRemove()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestBackend_basic(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cid, hostname := prepareTestContainer(t, config.StorageView, b)
+ if cid != "" {
+ defer cleanupTestContainer(t, cid)
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepConfig(t, hostname),
+ testAccStepRole(t),
+ testAccStepReadCreds(t, "test"),
+ },
+ })
+}
+
+func TestBackend_roleCrud(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cid, hostname := prepareTestContainer(t, config.StorageView, b)
+ if cid != "" {
+ defer cleanupTestContainer(t, cid)
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepConfig(t, hostname),
+ testAccStepRole(t),
+ testAccStepRoleWithOptions(t),
+ testAccStepReadRole(t, "test", testRole),
+ testAccStepReadRole(t, "test2", testRole),
+ testAccStepDeleteRole(t, "test"),
+ testAccStepDeleteRole(t, "test2"),
+ testAccStepReadRole(t, "test", ""),
+ testAccStepReadRole(t, "test2", ""),
+ },
+ })
+}
+
+func testAccPreCheck(t *testing.T) {
+ if v := os.Getenv("CASSANDRA_HOST"); v == "" {
+ t.Fatal("CASSANDRA_HOST must be set for acceptance tests")
+ }
+}
+
+func testAccStepConfig(t *testing.T, hostname string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config/connection",
+ Data: map[string]interface{}{
+ "hosts": hostname,
+ "username": "cassandra",
+ "password": "cassandra",
+ "protocol_version": 3,
+ },
+ }
+}
+
+func testAccStepRole(t *testing.T) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "roles/test",
+ Data: map[string]interface{}{
+ "creation_cql": testRole,
+ },
+ }
+}
+
+func testAccStepRoleWithOptions(t *testing.T) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "roles/test2",
+ Data: map[string]interface{}{
+ "creation_cql": testRole,
+ "lease": "30s",
+ "consistency": "All",
+ },
+ }
+}
+
+func testAccStepDeleteRole(t *testing.T, n string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.DeleteOperation,
+ Path: "roles/" + n,
+ }
+}
+
+func testAccStepReadCreds(t *testing.T, name string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "creds/" + name,
+ Check: func(resp *logical.Response) error {
+ var d struct {
+ Username string `mapstructure:"username"`
+ Password string `mapstructure:"password"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+ log.Printf("[WARN] Generated credentials: %v", d)
+
+ return nil
+ },
+ }
+}
+
+func testAccStepReadRole(t *testing.T, name string, cql string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "roles/" + name,
+ Check: func(resp *logical.Response) error {
+ if resp == nil {
+ if cql == "" {
+ return nil
+ }
+
+ return fmt.Errorf("response is nil")
+ }
+
+ var d struct {
+ CreationCQL string `mapstructure:"creation_cql"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+
+ if d.CreationCQL != cql {
+ return fmt.Errorf("bad: %#v\n%#v\n%#v\n", resp, cql, d.CreationCQL)
+ }
+
+ return nil
+ },
+ }
+}
+
+const testRole = `CREATE USER '{{username}}' WITH PASSWORD '{{password}}' NOSUPERUSER;
+GRANT ALL PERMISSIONS ON ALL KEYSPACES TO {{username}};`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/path_config_connection.go b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/path_config_connection.go
new file mode 100644
index 0000000..e00587d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/path_config_connection.go
@@ -0,0 +1,240 @@
+package cassandra
+
+import (
+ "fmt"
+
+ "github.com/fatih/structs"
+ "github.com/hashicorp/vault/helper/certutil"
+ "github.com/hashicorp/vault/helper/tlsutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathConfigConnection(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "config/connection",
+ Fields: map[string]*framework.FieldSchema{
+ "hosts": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Comma-separated list of hosts",
+ },
+
+ "username": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "The username to use for connecting to the cluster",
+ },
+
+ "password": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "The password to use for connecting to the cluster",
+ },
+
+ "tls": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Description: `Whether to use TLS. If pem_bundle or pem_json are
+set, this is automatically set to true`,
+ },
+
+ "insecure_tls": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Description: `Whether to use TLS but skip verification; has no
+effect if a CA certificate is provided`,
+ },
+
+ "tls_min_version": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "tls12",
+ Description: "Minimum TLS version to use. Accepted values are 'tls10', 'tls11' or 'tls12'. Defaults to 'tls12'",
+ },
+
+ "pem_bundle": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `PEM-format, concatenated unencrypted secret key
+and certificate, with optional CA certificate`,
+ },
+
+ "pem_json": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `JSON containing a PEM-format, unencrypted secret
+key and certificate, with optional CA certificate.
+The JSON output of a certificate issued with the PKI
+backend can be directly passed into this parameter.
+If both this and "pem_bundle" are specified, this will
+take precedence.`,
+ },
+
+ "protocol_version": &framework.FieldSchema{
+ Type: framework.TypeInt,
+ Description: `The protocol version to use. Defaults to 2.`,
+ },
+
+ "connect_timeout": &framework.FieldSchema{
+ Type: framework.TypeDurationSecond,
+ Default: 5,
+ Description: `The connection timeout to use. Defaults to 5.`,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathConnectionRead,
+ logical.UpdateOperation: b.pathConnectionWrite,
+ },
+
+ HelpSynopsis: pathConfigConnectionHelpSyn,
+ HelpDescription: pathConfigConnectionHelpDesc,
+ }
+}
+
+func (b *backend) pathConnectionRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ entry, err := req.Storage.Get("config/connection")
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return logical.ErrorResponse(fmt.Sprintf("Configure the DB connection with config/connection first")), nil
+ }
+
+ config := &sessionConfig{}
+ if err := entry.DecodeJSON(config); err != nil {
+ return nil, err
+ }
+
+ config.Password = "**********"
+ if len(config.PrivateKey) > 0 {
+ config.PrivateKey = "**********"
+ }
+
+ return &logical.Response{
+ Data: structs.New(config).Map(),
+ }, nil
+}
+
+func (b *backend) pathConnectionWrite(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ hosts := data.Get("hosts").(string)
+ username := data.Get("username").(string)
+ password := data.Get("password").(string)
+
+ switch {
+ case len(hosts) == 0:
+ return logical.ErrorResponse("Hosts cannot be empty"), nil
+ case len(username) == 0:
+ return logical.ErrorResponse("Username cannot be empty"), nil
+ case len(password) == 0:
+ return logical.ErrorResponse("Password cannot be empty"), nil
+ }
+
+ config := &sessionConfig{
+ Hosts: hosts,
+ Username: username,
+ Password: password,
+ TLS: data.Get("tls").(bool),
+ InsecureTLS: data.Get("insecure_tls").(bool),
+ ProtocolVersion: data.Get("protocol_version").(int),
+ ConnectTimeout: data.Get("connect_timeout").(int),
+ }
+
+ config.TLSMinVersion = data.Get("tls_min_version").(string)
+ if config.TLSMinVersion == "" {
+ return logical.ErrorResponse("failed to get 'tls_min_version' value"), nil
+ }
+
+ var ok bool
+ _, ok = tlsutil.TLSLookup[config.TLSMinVersion]
+ if !ok {
+ return logical.ErrorResponse("invalid 'tls_min_version'"), nil
+ }
+
+ if config.InsecureTLS {
+ config.TLS = true
+ }
+
+ pemBundle := data.Get("pem_bundle").(string)
+ pemJSON := data.Get("pem_json").(string)
+
+ var certBundle *certutil.CertBundle
+ var parsedCertBundle *certutil.ParsedCertBundle
+ var err error
+
+ switch {
+ case len(pemJSON) != 0:
+ parsedCertBundle, err = certutil.ParsePKIJSON([]byte(pemJSON))
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("Could not parse given JSON; it must be in the format of the output of the PKI backend certificate issuing command: %s", err)), nil
+ }
+ certBundle, err = parsedCertBundle.ToCertBundle()
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("Error marshaling PEM information: %s", err)), nil
+ }
+ config.Certificate = certBundle.Certificate
+ config.PrivateKey = certBundle.PrivateKey
+ config.IssuingCA = certBundle.IssuingCA
+ config.TLS = true
+
+ case len(pemBundle) != 0:
+ parsedCertBundle, err = certutil.ParsePEMBundle(pemBundle)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("Error parsing the given PEM information: %s", err)), nil
+ }
+ certBundle, err = parsedCertBundle.ToCertBundle()
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("Error marshaling PEM information: %s", err)), nil
+ }
+ config.Certificate = certBundle.Certificate
+ config.PrivateKey = certBundle.PrivateKey
+ config.IssuingCA = certBundle.IssuingCA
+ config.TLS = true
+ }
+
+ session, err := createSession(config, req.Storage)
+ if err != nil {
+ return logical.ErrorResponse(err.Error()), nil
+ }
+
+ // Store it
+ entry, err := logical.StorageEntryJSON("config/connection", config)
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ // Reset the DB connection
+ b.ResetDB(session)
+
+ return nil, nil
+}
+
+const pathConfigConnectionHelpSyn = `
+Configure the connection information to talk to Cassandra.
+`
+
+const pathConfigConnectionHelpDesc = `
+This path configures the connection information used to connect to Cassandra.
+
+"hosts" is a comma-deliniated list of hostnames in the Cassandra cluster.
+
+"username" and "password" are self-explanatory, although the given user
+must have superuser access within Cassandra. Note that since this backend
+issues username/password credentials, Cassandra must be configured to use
+PasswordAuthenticator or a similar backend for its authentication. If you wish
+to have no authorization in Cassandra and want to use TLS client certificates,
+see the PKI backend.
+
+TLS works as follows:
+
+* If "tls" is set to true, the connection will use TLS; this happens automatically if "pem_bundle", "pem_json", or "insecure_tls" is set
+
+* If "insecure_tls" is set to true, the connection will not perform verification of the server certificate; this also sets "tls" to true
+
+* If only "issuing_ca" is set in "pem_json", or the only certificate in "pem_bundle" is a CA certificate, the given CA certificate will be used for server certificate verification; otherwise the system CA certificates will be used
+
+* If "certificate" and "private_key" are set in "pem_bundle" or "pem_json", client auth will be turned on for the connection
+
+"pem_bundle" should be a PEM-concatenated bundle of a private key + client certificate, an issuing CA certificate, or both. "pem_json" should contain the same information; for convenience, the JSON format is the same as that output by the issue command from the PKI backend.
+
+When configuring the connection information, the backend will verify its
+validity.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/path_creds_create.go b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/path_creds_create.go
new file mode 100644
index 0000000..4b025ba
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/path_creds_create.go
@@ -0,0 +1,123 @@
+package cassandra
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/gocql/gocql"
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathCredsCreate(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "creds/" + framework.GenericNameRegex("name"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathCredsCreateRead,
+ },
+
+ HelpSynopsis: pathCredsCreateReadHelpSyn,
+ HelpDescription: pathCredsCreateReadHelpDesc,
+ }
+}
+
+func (b *backend) pathCredsCreateRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ name := data.Get("name").(string)
+
+ // Get the role
+ role, err := getRole(req.Storage, name)
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return logical.ErrorResponse(fmt.Sprintf("Unknown role: %s", name)), nil
+ }
+
+ displayName := req.DisplayName
+ userUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ return nil, err
+ }
+ username := fmt.Sprintf("vault_%s_%s_%s_%d", name, displayName, userUUID, time.Now().Unix())
+ username = strings.Replace(username, "-", "_", -1)
+ password, err := uuid.GenerateUUID()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get our connection
+ session, err := b.DB(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+
+ // Set consistency
+ if role.Consistency != "" {
+ consistencyValue, err := gocql.ParseConsistencyWrapper(role.Consistency)
+ if err != nil {
+ return nil, err
+ }
+
+ session.SetConsistency(consistencyValue)
+ }
+
+ // Execute each query
+ for _, query := range strutil.ParseArbitraryStringSlice(role.CreationCQL, ";") {
+ query = strings.TrimSpace(query)
+ if len(query) == 0 {
+ continue
+ }
+
+ err = session.Query(substQuery(query, map[string]string{
+ "username": username,
+ "password": password,
+ })).Exec()
+ if err != nil {
+ for _, query := range strutil.ParseArbitraryStringSlice(role.RollbackCQL, ";") {
+ query = strings.TrimSpace(query)
+ if len(query) == 0 {
+ continue
+ }
+
+ session.Query(substQuery(query, map[string]string{
+ "username": username,
+ "password": password,
+ })).Exec()
+ }
+ return nil, err
+ }
+ }
+
+ // Return the secret
+ resp := b.Secret(SecretCredsType).Response(map[string]interface{}{
+ "username": username,
+ "password": password,
+ }, map[string]interface{}{
+ "username": username,
+ "role": name,
+ })
+ resp.Secret.TTL = role.Lease
+
+ return resp, nil
+}
+
+const pathCredsCreateReadHelpSyn = `
+Request database credentials for a certain role.
+`
+
+const pathCredsCreateReadHelpDesc = `
+This path creates database credentials for a certain role. The
+database credentials will be generated on demand and will be automatically
+revoked when the lease is up.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/path_roles.go b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/path_roles.go
new file mode 100644
index 0000000..9d4f48b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/path_roles.go
@@ -0,0 +1,198 @@
+package cassandra
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/fatih/structs"
+ "github.com/gocql/gocql"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+const (
+ defaultCreationCQL = `CREATE USER '{{username}}' WITH PASSWORD '{{password}}' NOSUPERUSER;`
+ defaultRollbackCQL = `DROP USER '{{username}}';`
+)
+
+func pathRoles(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "roles/" + framework.GenericNameRegex("name"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role",
+ },
+
+ "creation_cql": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: defaultCreationCQL,
+ Description: `CQL to create a user and optionally grant
+authorization. If not supplied, a default that
+creates non-superuser accounts with the built-in
+password authenticator will be used; no
+authorization grants will be configured. Separate
+statements by semicolons; use @file to load from a
+file. Valid template values are '{{username}}' and
+'{{password}}' -- the single quotes are important!`,
+ },
+
+ "rollback_cql": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: defaultRollbackCQL,
+ Description: `CQL to roll back an account operation. This will
+be used if there is an error during execution of a
+statement passed in via the "creation_cql" parameter
+parameter. The default simply drops the user, which
+should generally be sufficient. Separate statements
+by semicolons; use @file to load from a file. Valid
+template values are '{{username}}' and
+'{{password}}' -- the single quotes are important!`,
+ },
+
+ "lease": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "4h",
+ Description: "The lease length; defaults to 4 hours",
+ },
+
+ "consistency": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "Quorum",
+ Description: "The consistency level for the operations; defaults to Quorum.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathRoleRead,
+ logical.UpdateOperation: b.pathRoleCreate,
+ logical.DeleteOperation: b.pathRoleDelete,
+ },
+
+ HelpSynopsis: pathRoleHelpSyn,
+ HelpDescription: pathRoleHelpDesc,
+ }
+}
+
+func getRole(s logical.Storage, n string) (*roleEntry, error) {
+ entry, err := s.Get("role/" + n)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result roleEntry
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+func (b *backend) pathRoleDelete(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ err := req.Storage.Delete("role/" + data.Get("name").(string))
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *backend) pathRoleRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ role, err := getRole(req.Storage, data.Get("name").(string))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ return &logical.Response{
+ Data: structs.New(role).Map(),
+ }, nil
+}
+
+func (b *backend) pathRoleCreate(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ name := data.Get("name").(string)
+
+ creationCQL := data.Get("creation_cql").(string)
+
+ rollbackCQL := data.Get("rollback_cql").(string)
+
+ leaseRaw := data.Get("lease").(string)
+ lease, err := time.ParseDuration(leaseRaw)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Error parsing lease value of %s: %s", leaseRaw, err)), nil
+ }
+
+ consistencyStr := data.Get("consistency").(string)
+ _, err = gocql.ParseConsistencyWrapper(consistencyStr)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Error parsing consistency value of %q: %v", consistencyStr, err)), nil
+ }
+
+ entry := &roleEntry{
+ Lease: lease,
+ CreationCQL: creationCQL,
+ RollbackCQL: rollbackCQL,
+ Consistency: consistencyStr,
+ }
+
+ // Store it
+ entryJSON, err := logical.StorageEntryJSON("role/"+name, entry)
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(entryJSON); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+type roleEntry struct {
+ CreationCQL string `json:"creation_cql" structs:"creation_cql"`
+ Lease time.Duration `json:"lease" structs:"lease"`
+ RollbackCQL string `json:"rollback_cql" structs:"rollback_cql"`
+ Consistency string `json:"consistency" structs:"consistency"`
+}
+
+const pathRoleHelpSyn = `
+Manage the roles that can be created with this backend.
+`
+
+const pathRoleHelpDesc = `
+This path lets you manage the roles that can be created with this backend.
+
+The "creation_cql" parameter customizes the CQL string used to create users
+and assign them grants. This can be a sequence of CQL queries separated by
+semicolons. Some substitution will be done to the CQL string for certain keys.
+The names of the variables must be surrounded by '{{' and '}}' to be replaced.
+Note that it is important that single quotes are used, not double quotes.
+
+ * "username" - The random username generated for the DB user.
+
+ * "password" - The random password generated for the DB user.
+
+If no "creation_cql" parameter is given, a default will be used:
+
+` + defaultCreationCQL + `
+
+This default should be suitable for Cassandra installations using the password
+authenticator but not configured to use authorization.
+
+Similarly, the "rollback_cql" is used if user creation fails, in the absense of
+Cassandra transactions. The default should be suitable for almost any
+instance of Cassandra:
+
+` + defaultRollbackCQL + `
+
+"lease" the lease time; if not set the mount/system defaults are used.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/secret_creds.go b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/secret_creds.go
new file mode 100644
index 0000000..f894ad2
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/secret_creds.go
@@ -0,0 +1,76 @@
+package cassandra
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+// SecretCredsType is the type of creds issued from this backend
+const SecretCredsType = "cassandra"
+
+func secretCreds(b *backend) *framework.Secret {
+ return &framework.Secret{
+ Type: SecretCredsType,
+ Fields: map[string]*framework.FieldSchema{
+ "username": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Username",
+ },
+
+ "password": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Password",
+ },
+ },
+
+ Renew: b.secretCredsRenew,
+ Revoke: b.secretCredsRevoke,
+ }
+}
+
+func (b *backend) secretCredsRenew(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ // Get the lease information
+ roleRaw, ok := req.Secret.InternalData["role"]
+ if !ok {
+ return nil, fmt.Errorf("secret is missing role internal data")
+ }
+ roleName, ok := roleRaw.(string)
+ if !ok {
+ return nil, fmt.Errorf("error converting role internal data to string")
+ }
+
+ role, err := getRole(req.Storage, roleName)
+ if err != nil {
+ return nil, fmt.Errorf("unable to load role: %s", err)
+ }
+
+ return framework.LeaseExtend(role.Lease, 0, b.System())(req, d)
+}
+
+func (b *backend) secretCredsRevoke(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ // Get the username from the internal data
+ usernameRaw, ok := req.Secret.InternalData["username"]
+ if !ok {
+ return nil, fmt.Errorf("secret is missing username internal data")
+ }
+ username, ok := usernameRaw.(string)
+ if !ok {
+ return nil, fmt.Errorf("error converting username internal data to string")
+ }
+
+ session, err := b.DB(req.Storage)
+ if err != nil {
+ return nil, fmt.Errorf("error getting session")
+ }
+
+ err = session.Query(fmt.Sprintf("DROP USER '%s'", username)).Exec()
+ if err != nil {
+ return nil, fmt.Errorf("error removing user %s", username)
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/test-fixtures/cassandra.yaml b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/test-fixtures/cassandra.yaml
new file mode 100644
index 0000000..5b12c8c
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/test-fixtures/cassandra.yaml
@@ -0,0 +1,1146 @@
+# Cassandra storage config YAML
+
+# NOTE:
+# See http://wiki.apache.org/cassandra/StorageConfiguration for
+# full explanations of configuration directives
+# /NOTE
+
+# The name of the cluster. This is mainly used to prevent machines in
+# one logical cluster from joining another.
+cluster_name: 'Test Cluster'
+
+# This defines the number of tokens randomly assigned to this node on the ring
+# The more tokens, relative to other nodes, the larger the proportion of data
+# that this node will store. You probably want all nodes to have the same number
+# of tokens assuming they have equal hardware capability.
+#
+# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility,
+# and will use the initial_token as described below.
+#
+# Specifying initial_token will override this setting on the node's initial start,
+# on subsequent starts, this setting will apply even if initial token is set.
+#
+# If you already have a cluster with 1 token per node, and wish to migrate to
+# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations
+num_tokens: 256
+
+# Triggers automatic allocation of num_tokens tokens for this node. The allocation
+# algorithm attempts to choose tokens in a way that optimizes replicated load over
+# the nodes in the datacenter for the replication strategy used by the specified
+# keyspace.
+#
+# The load assigned to each node will be close to proportional to its number of
+# vnodes.
+#
+# Only supported with the Murmur3Partitioner.
+# allocate_tokens_for_keyspace: KEYSPACE
+
+# initial_token allows you to specify tokens manually. While you can use it with
+# vnodes (num_tokens > 1, above) -- in which case you should provide a
+# comma-separated list -- it's primarily used when adding nodes to legacy clusters
+# that do not have vnodes enabled.
+# initial_token:
+
+# See http://wiki.apache.org/cassandra/HintedHandoff
+# May either be "true" or "false" to enable globally
+hinted_handoff_enabled: true
+
+# When hinted_handoff_enabled is true, a black list of data centers that will not
+# perform hinted handoff
+# hinted_handoff_disabled_datacenters:
+# - DC1
+# - DC2
+
+# this defines the maximum amount of time a dead host will have hints
+# generated. After it has been dead this long, new hints for it will not be
+# created until it has been seen alive and gone down again.
+max_hint_window_in_ms: 10800000 # 3 hours
+
+# Maximum throttle in KBs per second, per delivery thread. This will be
+# reduced proportionally to the number of nodes in the cluster. (If there
+# are two nodes in the cluster, each delivery thread will use the maximum
+# rate; if there are three, each will throttle to half of the maximum,
+# since we expect two nodes to be delivering hints simultaneously.)
+hinted_handoff_throttle_in_kb: 1024
+
+# Number of threads with which to deliver hints;
+# Consider increasing this number when you have multi-dc deployments, since
+# cross-dc handoff tends to be slower
+max_hints_delivery_threads: 2
+
+# Directory where Cassandra should store hints.
+# If not set, the default directory is $CASSANDRA_HOME/data/hints.
+# hints_directory: /var/lib/cassandra/hints
+
+# How often hints should be flushed from the internal buffers to disk.
+# Will *not* trigger fsync.
+hints_flush_period_in_ms: 10000
+
+# Maximum size for a single hints file, in megabytes.
+max_hints_file_size_in_mb: 128
+
+# Compression to apply to the hint files. If omitted, hints files
+# will be written uncompressed. LZ4, Snappy, and Deflate compressors
+# are supported.
+#hints_compression:
+# - class_name: LZ4Compressor
+# parameters:
+# -
+
+# Maximum throttle in KBs per second, total. This will be
+# reduced proportionally to the number of nodes in the cluster.
+batchlog_replay_throttle_in_kb: 1024
+
+# Authentication backend, implementing IAuthenticator; used to identify users
+# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator,
+# PasswordAuthenticator}.
+#
+# - AllowAllAuthenticator performs no checks - set it to disable authentication.
+# - PasswordAuthenticator relies on username/password pairs to authenticate
+# users. It keeps usernames and hashed passwords in system_auth.credentials table.
+# Please increase system_auth keyspace replication factor if you use this authenticator.
+# If using PasswordAuthenticator, CassandraRoleManager must also be used (see below)
+authenticator: PasswordAuthenticator
+
+# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions
+# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer,
+# CassandraAuthorizer}.
+#
+# - AllowAllAuthorizer allows any action to any user - set it to disable authorization.
+# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please
+# increase system_auth keyspace replication factor if you use this authorizer.
+authorizer: CassandraAuthorizer
+
+# Part of the Authentication & Authorization backend, implementing IRoleManager; used
+# to maintain grants and memberships between roles.
+# Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager,
+# which stores role information in the system_auth keyspace. Most functions of the
+# IRoleManager require an authenticated login, so unless the configured IAuthenticator
+# actually implements authentication, most of this functionality will be unavailable.
+#
+# - CassandraRoleManager stores role data in the system_auth keyspace. Please
+# increase system_auth keyspace replication factor if you use this role manager.
+role_manager: CassandraRoleManager
+
+# Validity period for roles cache (fetching granted roles can be an expensive
+# operation depending on the role manager, CassandraRoleManager is one example)
+# Granted roles are cached for authenticated sessions in AuthenticatedUser and
+# after the period specified here, become eligible for (async) reload.
+# Defaults to 2000, set to 0 to disable caching entirely.
+# Will be disabled automatically for AllowAllAuthenticator.
+roles_validity_in_ms: 2000
+
+# Refresh interval for roles cache (if enabled).
+# After this interval, cache entries become eligible for refresh. Upon next
+# access, an async reload is scheduled and the old value returned until it
+# completes. If roles_validity_in_ms is non-zero, then this must be
+# also.
+# Defaults to the same value as roles_validity_in_ms.
+# roles_update_interval_in_ms: 2000
+
+# Validity period for permissions cache (fetching permissions can be an
+# expensive operation depending on the authorizer, CassandraAuthorizer is
+# one example). Defaults to 2000, set to 0 to disable.
+# Will be disabled automatically for AllowAllAuthorizer.
+permissions_validity_in_ms: 2000
+
+# Refresh interval for permissions cache (if enabled).
+# After this interval, cache entries become eligible for refresh. Upon next
+# access, an async reload is scheduled and the old value returned until it
+# completes. If permissions_validity_in_ms is non-zero, then this must be
+# also.
+# Defaults to the same value as permissions_validity_in_ms.
+# permissions_update_interval_in_ms: 2000
+
+# Validity period for credentials cache. This cache is tightly coupled to
+# the provided PasswordAuthenticator implementation of IAuthenticator. If
+# another IAuthenticator implementation is configured, this cache will not
+# be automatically used and so the following settings will have no effect.
+# Please note, credentials are cached in their encrypted form, so while
+# activating this cache may reduce the number of queries made to the
+# underlying table, it may not bring a significant reduction in the
+# latency of individual authentication attempts.
+# Defaults to 2000, set to 0 to disable credentials caching.
+credentials_validity_in_ms: 2000
+
+# Refresh interval for credentials cache (if enabled).
+# After this interval, cache entries become eligible for refresh. Upon next
+# access, an async reload is scheduled and the old value returned until it
+# completes. If credentials_validity_in_ms is non-zero, then this must be
+# also.
+# Defaults to the same value as credentials_validity_in_ms.
+# credentials_update_interval_in_ms: 2000
+
+# The partitioner is responsible for distributing groups of rows (by
+# partition key) across nodes in the cluster. You should leave this
+# alone for new clusters. The partitioner can NOT be changed without
+# reloading all data, so when upgrading you should set this to the
+# same partitioner you were already using.
+#
+# Besides Murmur3Partitioner, partitioners included for backwards
+# compatibility include RandomPartitioner, ByteOrderedPartitioner, and
+# OrderPreservingPartitioner.
+#
+partitioner: org.apache.cassandra.dht.Murmur3Partitioner
+
+# Directories where Cassandra should store data on disk. Cassandra
+# will spread data evenly across them, subject to the granularity of
+# the configured compaction strategy.
+# If not set, the default directory is $CASSANDRA_HOME/data/data.
+data_file_directories:
+ - /var/lib/cassandra/data
+
+# commit log. when running on magnetic HDD, this should be a
+# separate spindle than the data directories.
+# If not set, the default directory is $CASSANDRA_HOME/data/commitlog.
+commitlog_directory: /var/lib/cassandra/commitlog
+
+# Enable / disable CDC functionality on a per-node basis. This modifies the logic used
+# for write path allocation rejection (standard: never reject. cdc: reject Mutation
+# containing a CDC-enabled table if at space limit in cdc_raw_directory).
+cdc_enabled: false
+
+# CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the
+# segment contains mutations for a CDC-enabled table. This should be placed on a
+# separate spindle than the data directories. If not set, the default directory is
+# $CASSANDRA_HOME/data/cdc_raw.
+# cdc_raw_directory: /var/lib/cassandra/cdc_raw
+
+# Policy for data disk failures:
+#
+# die
+# shut down gossip and client transports and kill the JVM for any fs errors or
+# single-sstable errors, so the node can be replaced.
+#
+# stop_paranoid
+# shut down gossip and client transports even for single-sstable errors,
+# kill the JVM for errors during startup.
+#
+# stop
+# shut down gossip and client transports, leaving the node effectively dead, but
+# can still be inspected via JMX, kill the JVM for errors during startup.
+#
+# best_effort
+# stop using the failed disk and respond to requests based on
+# remaining available sstables. This means you WILL see obsolete
+# data at CL.ONE!
+#
+# ignore
+# ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
+disk_failure_policy: stop
+
+# Policy for commit disk failures:
+#
+# die
+# shut down gossip and Thrift and kill the JVM, so the node can be replaced.
+#
+# stop
+# shut down gossip and Thrift, leaving the node effectively dead, but
+# can still be inspected via JMX.
+#
+# stop_commit
+# shutdown the commit log, letting writes collect but
+# continuing to service reads, as in pre-2.0.5 Cassandra
+#
+# ignore
+# ignore fatal errors and let the batches fail
+commit_failure_policy: stop
+
+# Maximum size of the native protocol prepared statement cache
+#
+# Valid values are either "auto" (omitting the value) or a value greater 0.
+#
+# Note that specifying a too large value will result in long running GCs and possbily
+# out-of-memory errors. Keep the value at a small fraction of the heap.
+#
+# If you constantly see "prepared statements discarded in the last minute because
+# cache limit reached" messages, the first step is to investigate the root cause
+# of these messages and check whether prepared statements are used correctly -
+# i.e. use bind markers for variable parts.
+#
+# Do only change the default value, if you really have more prepared statements than
+# fit in the cache. In most cases it is not neccessary to change this value.
+# Constantly re-preparing statements is a performance penalty.
+#
+# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater
+prepared_statements_cache_size_mb:
+
+# Maximum size of the Thrift prepared statement cache
+#
+# If you do not use Thrift at all, it is safe to leave this value at "auto".
+#
+# See description of 'prepared_statements_cache_size_mb' above for more information.
+#
+# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater
+thrift_prepared_statements_cache_size_mb:
+
+# Maximum size of the key cache in memory.
+#
+# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
+# minimum, sometimes more. The key cache is fairly tiny for the amount of
+# time it saves, so it's worthwhile to use it at large numbers.
+# The row cache saves even more time, but must contain the entire row,
+# so it is extremely space-intensive. It's best to only use the
+# row cache if you have hot rows or static rows.
+#
+# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
+#
+# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.
+key_cache_size_in_mb:
+
+# Duration in seconds after which Cassandra should
+# save the key cache. Caches are saved to saved_caches_directory as
+# specified in this configuration file.
+#
+# Saved caches greatly improve cold-start speeds, and is relatively cheap in
+# terms of I/O for the key cache. Row cache saving is much more expensive and
+# has limited use.
+#
+# Default is 14400 or 4 hours.
+key_cache_save_period: 14400
+
+# Number of keys from the key cache to save
+# Disabled by default, meaning all keys are going to be saved
+# key_cache_keys_to_save: 100
+
+# Row cache implementation class name. Available implementations:
+#
+# org.apache.cassandra.cache.OHCProvider
+# Fully off-heap row cache implementation (default).
+#
+# org.apache.cassandra.cache.SerializingCacheProvider
+# This is the row cache implementation availabile
+# in previous releases of Cassandra.
+# row_cache_class_name: org.apache.cassandra.cache.OHCProvider
+
+# Maximum size of the row cache in memory.
+# Please note that OHC cache implementation requires some additional off-heap memory to manage
+# the map structures and some in-flight memory during operations before/after cache entries can be
+# accounted against the cache capacity. This overhead is usually small compared to the whole capacity.
+# Do not specify more memory that the system can afford in the worst usual situation and leave some
+# headroom for OS block level cache. Do never allow your system to swap.
+#
+# Default value is 0, to disable row caching.
+row_cache_size_in_mb: 0
+
+# Duration in seconds after which Cassandra should save the row cache.
+# Caches are saved to saved_caches_directory as specified in this configuration file.
+#
+# Saved caches greatly improve cold-start speeds, and is relatively cheap in
+# terms of I/O for the key cache. Row cache saving is much more expensive and
+# has limited use.
+#
+# Default is 0 to disable saving the row cache.
+row_cache_save_period: 0
+
+# Number of keys from the row cache to save.
+# Specify 0 (which is the default), meaning all keys are going to be saved
+# row_cache_keys_to_save: 100
+
+# Maximum size of the counter cache in memory.
+#
+# Counter cache helps to reduce counter locks' contention for hot counter cells.
+# In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before
+# write entirely. With RF > 1 a counter cache hit will still help to reduce the duration
+# of the lock hold, helping with hot counter cell updates, but will not allow skipping
+# the read entirely. Only the local (clock, count) tuple of a counter cell is kept
+# in memory, not the whole counter, so it's relatively cheap.
+#
+# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
+#
+# Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache.
+# NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache.
+counter_cache_size_in_mb:
+
+# Duration in seconds after which Cassandra should
+# save the counter cache (keys only). Caches are saved to saved_caches_directory as
+# specified in this configuration file.
+#
+# Default is 7200 or 2 hours.
+counter_cache_save_period: 7200
+
+# Number of keys from the counter cache to save
+# Disabled by default, meaning all keys are going to be saved
+# counter_cache_keys_to_save: 100
+
+# saved caches
+# If not set, the default directory is $CASSANDRA_HOME/data/saved_caches.
+saved_caches_directory: /var/lib/cassandra/saved_caches
+
+# commitlog_sync may be either "periodic" or "batch."
+#
+# When in batch mode, Cassandra won't ack writes until the commit log
+# has been fsynced to disk. It will wait
+# commitlog_sync_batch_window_in_ms milliseconds between fsyncs.
+# This window should be kept short because the writer threads will
+# be unable to do extra work while waiting. (You may need to increase
+# concurrent_writes for the same reason.)
+#
+# commitlog_sync: batch
+# commitlog_sync_batch_window_in_ms: 2
+#
+# the other option is "periodic" where writes may be acked immediately
+# and the CommitLog is simply synced every commitlog_sync_period_in_ms
+# milliseconds.
+commitlog_sync: periodic
+commitlog_sync_period_in_ms: 10000
+
+# The size of the individual commitlog file segments. A commitlog
+# segment may be archived, deleted, or recycled once all the data
+# in it (potentially from each columnfamily in the system) has been
+# flushed to sstables.
+#
+# The default size is 32, which is almost always fine, but if you are
+# archiving commitlog segments (see commitlog_archiving.properties),
+# then you probably want a finer granularity of archiving; 8 or 16 MB
+# is reasonable.
+# Max mutation size is also configurable via max_mutation_size_in_kb setting in
+# cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024.
+#
+# NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must
+# be set to at least twice the size of max_mutation_size_in_kb / 1024
+#
+commitlog_segment_size_in_mb: 32
+
+# Compression to apply to the commit log. If omitted, the commit log
+# will be written uncompressed. LZ4, Snappy, and Deflate compressors
+# are supported.
+# commitlog_compression:
+# - class_name: LZ4Compressor
+# parameters:
+# -
+
+# any class that implements the SeedProvider interface and has a
+# constructor that takes a Map of parameters will do.
+seed_provider:
+ # Addresses of hosts that are deemed contact points.
+ # Cassandra nodes use this list of hosts to find each other and learn
+ # the topology of the ring. You must change this if you are running
+ # multiple nodes!
+ - class_name: org.apache.cassandra.locator.SimpleSeedProvider
+ parameters:
+ # seeds is actually a comma-delimited list of addresses.
+ # Ex: ",,"
+ - seeds: "172.17.0.3"
+
+# For workloads with more data than can fit in memory, Cassandra's
+# bottleneck will be reads that need to fetch data from
+# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
+# order to allow the operations to enqueue low enough in the stack
+# that the OS and drives can reorder them. Same applies to
+# "concurrent_counter_writes", since counter writes read the current
+# values before incrementing and writing them back.
+#
+# On the other hand, since writes are almost never IO bound, the ideal
+# number of "concurrent_writes" is dependent on the number of cores in
+# your system; (8 * number_of_cores) is a good rule of thumb.
+concurrent_reads: 32
+concurrent_writes: 32
+concurrent_counter_writes: 32
+
+# For materialized view writes, as there is a read involved, so this should
+# be limited by the less of concurrent reads or concurrent writes.
+concurrent_materialized_view_writes: 32
+
+# Maximum memory to use for sstable chunk cache and buffer pooling.
+# 32MB of this are reserved for pooling buffers, the rest is used as an
+# cache that holds uncompressed sstable chunks.
+# Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap,
+# so is in addition to the memory allocated for heap. The cache also has on-heap
+# overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size
+# if the default 64k chunk size is used).
+# Memory is only allocated when needed.
+# file_cache_size_in_mb: 512
+
+# Flag indicating whether to allocate on or off heap when the sstable buffer
+# pool is exhausted, that is when it has exceeded the maximum memory
+# file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request.
+
+# buffer_pool_use_heap_if_exhausted: true
+
+# The strategy for optimizing disk read
+# Possible values are:
+# ssd (for solid state disks, the default)
+# spinning (for spinning disks)
+# disk_optimization_strategy: ssd
+
+# Total permitted memory to use for memtables. Cassandra will stop
+# accepting writes when the limit is exceeded until a flush completes,
+# and will trigger a flush based on memtable_cleanup_threshold
+# If omitted, Cassandra will set both to 1/4 the size of the heap.
+# memtable_heap_space_in_mb: 2048
+# memtable_offheap_space_in_mb: 2048
+
+# Ratio of occupied non-flushing memtable size to total permitted size
+# that will trigger a flush of the largest memtable. Larger mct will
+# mean larger flushes and hence less compaction, but also less concurrent
+# flush activity which can make it difficult to keep your disks fed
+# under heavy write load.
+#
+# memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1)
+# memtable_cleanup_threshold: 0.11
+
+# Specify the way Cassandra allocates and manages memtable memory.
+# Options are:
+#
+# heap_buffers
+# on heap nio buffers
+#
+# offheap_buffers
+# off heap (direct) nio buffers
+#
+# offheap_objects
+# off heap objects
+memtable_allocation_type: heap_buffers
+
+# Total space to use for commit logs on disk.
+#
+# If space gets above this value, Cassandra will flush every dirty CF
+# in the oldest segment and remove it. So a small total commitlog space
+# will tend to cause more flush activity on less-active columnfamilies.
+#
+# The default value is the smaller of 8192, and 1/4 of the total space
+# of the commitlog volume.
+#
+# commitlog_total_space_in_mb: 8192
+
+# This sets the amount of memtable flush writer threads. These will
+# be blocked by disk io, and each one will hold a memtable in memory
+# while blocked.
+#
+# memtable_flush_writers defaults to one per data_file_directory.
+#
+# If your data directories are backed by SSD, you can increase this, but
+# avoid having memtable_flush_writers * data_file_directories > number of cores
+#memtable_flush_writers: 1
+
+# Total space to use for change-data-capture logs on disk.
+#
+# If space gets above this value, Cassandra will throw WriteTimeoutException
+# on Mutations including tables with CDC enabled. A CDCCompactor is responsible
+# for parsing the raw CDC logs and deleting them when parsing is completed.
+#
+# The default value is the min of 4096 mb and 1/8th of the total space
+# of the drive where cdc_raw_directory resides.
+# cdc_total_space_in_mb: 4096
+
+# When we hit our cdc_raw limit and the CDCCompactor is either running behind
+# or experiencing backpressure, we check at the following interval to see if any
+# new space for cdc-tracked tables has been made available. Default to 250ms
+# cdc_free_space_check_interval_ms: 250
+
+# A fixed memory pool size in MB for for SSTable index summaries. If left
+# empty, this will default to 5% of the heap size. If the memory usage of
+# all index summaries exceeds this limit, SSTables with low read rates will
+# shrink their index summaries in order to meet this limit. However, this
+# is a best-effort process. In extreme conditions Cassandra may need to use
+# more than this amount of memory.
+index_summary_capacity_in_mb:
+
+# How frequently index summaries should be resampled. This is done
+# periodically to redistribute memory from the fixed-size pool to sstables
+# proportional their recent read rates. Setting to -1 will disable this
+# process, leaving existing index summaries at their current sampling level.
+index_summary_resize_interval_in_minutes: 60
+
+# Whether to, when doing sequential writing, fsync() at intervals in
+# order to force the operating system to flush the dirty
+# buffers. Enable this to avoid sudden dirty buffer flushing from
+# impacting read latencies. Almost always a good idea on SSDs; not
+# necessarily on platters.
+trickle_fsync: false
+trickle_fsync_interval_in_kb: 10240
+
+# TCP port, for commands and data
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+storage_port: 7000
+
+# SSL port, for encrypted communication. Unused unless enabled in
+# encryption_options
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+ssl_storage_port: 7001
+
+# Address or interface to bind to and tell other Cassandra nodes to connect to.
+# You _must_ change this if you want multiple nodes to be able to communicate!
+#
+# Set listen_address OR listen_interface, not both.
+#
+# Leaving it blank leaves it up to InetAddress.getLocalHost(). This
+# will always do the Right Thing _if_ the node is properly configured
+# (hostname, name resolution, etc), and the Right Thing is to use the
+# address associated with the hostname (it might not be).
+#
+# Setting listen_address to 0.0.0.0 is always wrong.
+#
+listen_address: 172.17.0.3
+
+# Set listen_address OR listen_interface, not both. Interfaces must correspond
+# to a single address, IP aliasing is not supported.
+# listen_interface: eth0
+
+# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address
+# you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4
+# address will be used. If true the first ipv6 address will be used. Defaults to false preferring
+# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.
+# listen_interface_prefer_ipv6: false
+
+# Address to broadcast to other Cassandra nodes
+# Leaving this blank will set it to the same value as listen_address
+broadcast_address: 172.17.0.3
+
+# When using multiple physical network interfaces, set this
+# to true to listen on broadcast_address in addition to
+# the listen_address, allowing nodes to communicate in both
+# interfaces.
+# Ignore this property if the network configuration automatically
+# routes between the public and private networks such as EC2.
+# listen_on_broadcast_address: false
+
+# Internode authentication backend, implementing IInternodeAuthenticator;
+# used to allow/disallow connections from peer nodes.
+# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator
+
+# Whether to start the native transport server.
+# Please note that the address on which the native transport is bound is the
+# same as the rpc_address. The port however is different and specified below.
+start_native_transport: true
+# port for the CQL native transport to listen for clients on
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+native_transport_port: 9042
+# Enabling native transport encryption in client_encryption_options allows you to either use
+# encryption for the standard port or to use a dedicated, additional port along with the unencrypted
+# standard native_transport_port.
+# Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption
+# for native_transport_port. Setting native_transport_port_ssl to a different value
+# from native_transport_port will use encryption for native_transport_port_ssl while
+# keeping native_transport_port unencrypted.
+# native_transport_port_ssl: 9142
+# The maximum threads for handling requests when the native transport is used.
+# This is similar to rpc_max_threads though the default differs slightly (and
+# there is no native_transport_min_threads, idle threads will always be stopped
+# after 30 seconds).
+# native_transport_max_threads: 128
+#
+# The maximum size of allowed frame. Frame (requests) larger than this will
+# be rejected as invalid. The default is 256MB. If you're changing this parameter,
+# you may want to adjust max_value_size_in_mb accordingly.
+# native_transport_max_frame_size_in_mb: 256
+
+# The maximum number of concurrent client connections.
+# The default is -1, which means unlimited.
+# native_transport_max_concurrent_connections: -1
+
+# The maximum number of concurrent client connections per source ip.
+# The default is -1, which means unlimited.
+# native_transport_max_concurrent_connections_per_ip: -1
+
+# Whether to start the thrift rpc server.
+start_rpc: false
+
+# The address or interface to bind the Thrift RPC service and native transport
+# server to.
+#
+# Set rpc_address OR rpc_interface, not both.
+#
+# Leaving rpc_address blank has the same effect as on listen_address
+# (i.e. it will be based on the configured hostname of the node).
+#
+# Note that unlike listen_address, you can specify 0.0.0.0, but you must also
+# set broadcast_rpc_address to a value other than 0.0.0.0.
+#
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+rpc_address: 0.0.0.0
+
+# Set rpc_address OR rpc_interface, not both. Interfaces must correspond
+# to a single address, IP aliasing is not supported.
+# rpc_interface: eth1
+
+# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address
+# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4
+# address will be used. If true the first ipv6 address will be used. Defaults to false preferring
+# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.
+# rpc_interface_prefer_ipv6: false
+
+# port for Thrift to listen for clients on
+rpc_port: 9160
+
+# RPC address to broadcast to drivers and other Cassandra nodes. This cannot
+# be set to 0.0.0.0. If left blank, this will be set to the value of
+# rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must
+# be set.
+broadcast_rpc_address: 172.17.0.3
+
+# enable or disable keepalive on rpc/native connections
+rpc_keepalive: true
+
+# Cassandra provides two out-of-the-box options for the RPC Server:
+#
+# sync
+# One thread per thrift connection. For a very large number of clients, memory
+# will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size
+# per thread, and that will correspond to your use of virtual memory (but physical memory
+# may be limited depending on use of stack space).
+#
+# hsha
+# Stands for "half synchronous, half asynchronous." All thrift clients are handled
+# asynchronously using a small number of threads that does not vary with the amount
+# of thrift clients (and thus scales well to many clients). The rpc requests are still
+# synchronous (one thread per active request). If hsha is selected then it is essential
+# that rpc_max_threads is changed from the default value of unlimited.
+#
+# The default is sync because on Windows hsha is about 30% slower. On Linux,
+# sync/hsha performance is about the same, with hsha of course using less memory.
+#
+# Alternatively, can provide your own RPC server by providing the fully-qualified class name
+# of an o.a.c.t.TServerFactory that can create an instance of it.
+rpc_server_type: sync
+
+# Uncomment rpc_min|max_thread to set request pool size limits.
+#
+# Regardless of your choice of RPC server (see above), the number of maximum requests in the
+# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync
+# RPC server, it also dictates the number of clients that can be connected at all).
+#
+# The default is unlimited and thus provides no protection against clients overwhelming the server. You are
+# encouraged to set a maximum that makes sense for you in production, but do keep in mind that
+# rpc_max_threads represents the maximum number of client requests this server may execute concurrently.
+#
+# rpc_min_threads: 16
+# rpc_max_threads: 2048
+
+# uncomment to set socket buffer sizes on rpc connections
+# rpc_send_buff_size_in_bytes:
+# rpc_recv_buff_size_in_bytes:
+
+# Uncomment to set socket buffer size for internode communication
+# Note that when setting this, the buffer size is limited by net.core.wmem_max
+# and when not setting it it is defined by net.ipv4.tcp_wmem
+# See also:
+# /proc/sys/net/core/wmem_max
+# /proc/sys/net/core/rmem_max
+# /proc/sys/net/ipv4/tcp_wmem
+# /proc/sys/net/ipv4/tcp_wmem
+# and 'man tcp'
+# internode_send_buff_size_in_bytes:
+
+# Uncomment to set socket buffer size for internode communication
+# Note that when setting this, the buffer size is limited by net.core.wmem_max
+# and when not setting it it is defined by net.ipv4.tcp_wmem
+# internode_recv_buff_size_in_bytes:
+
+# Frame size for thrift (maximum message length).
+thrift_framed_transport_size_in_mb: 15
+
+# Set to true to have Cassandra create a hard link to each sstable
+# flushed or streamed locally in a backups/ subdirectory of the
+# keyspace data. Removing these links is the operator's
+# responsibility.
+incremental_backups: false
+
+# Whether or not to take a snapshot before each compaction. Be
+# careful using this option, since Cassandra won't clean up the
+# snapshots for you. Mostly useful if you're paranoid when there
+# is a data format change.
+snapshot_before_compaction: false
+
+# Whether or not a snapshot is taken of the data before keyspace truncation
+# or dropping of column families. The STRONGLY advised default of true
+# should be used to provide data safety. If you set this flag to false, you will
+# lose data on truncation or drop.
+auto_snapshot: true
+
+# Granularity of the collation index of rows within a partition.
+# Increase if your rows are large, or if you have a very large
+# number of rows per partition. The competing goals are these:
+#
+# - a smaller granularity means more index entries are generated
+# and looking up rows withing the partition by collation column
+# is faster
+# - but, Cassandra will keep the collation index in memory for hot
+# rows (as part of the key cache), so a larger granularity means
+# you can cache more hot rows
+column_index_size_in_kb: 64
+
+# Per sstable indexed key cache entries (the collation index in memory
+# mentioned above) exceeding this size will not be held on heap.
+# This means that only partition information is held on heap and the
+# index entries are read from disk.
+#
+# Note that this size refers to the size of the
+# serialized index information and not the size of the partition.
+column_index_cache_size_in_kb: 2
+
+# Number of simultaneous compactions to allow, NOT including
+# validation "compactions" for anti-entropy repair. Simultaneous
+# compactions can help preserve read performance in a mixed read/write
+# workload, by mitigating the tendency of small sstables to accumulate
+# during a single long running compactions. The default is usually
+# fine and if you experience problems with compaction running too
+# slowly or too fast, you should look at
+# compaction_throughput_mb_per_sec first.
+#
+# concurrent_compactors defaults to the smaller of (number of disks,
+# number of cores), with a minimum of 2 and a maximum of 8.
+#
+# If your data directories are backed by SSD, you should increase this
+# to the number of cores.
+#concurrent_compactors: 1
+
+# Throttles compaction to the given total throughput across the entire
+# system. The faster you insert data, the faster you need to compact in
+# order to keep the sstable count down, but in general, setting this to
+# 16 to 32 times the rate you are inserting data is more than sufficient.
+# Setting this to 0 disables throttling. Note that this account for all types
+# of compaction, including validation compaction.
+compaction_throughput_mb_per_sec: 16
+
+# When compacting, the replacement sstable(s) can be opened before they
+# are completely written, and used in place of the prior sstables for
+# any range that has been written. This helps to smoothly transfer reads
+# between the sstables, reducing page cache churn and keeping hot rows hot
+sstable_preemptive_open_interval_in_mb: 50
+
+# Throttles all outbound streaming file transfers on this node to the
+# given total throughput in Mbps. This is necessary because Cassandra does
+# mostly sequential IO when streaming data during bootstrap or repair, which
+# can lead to saturating the network connection and degrading rpc performance.
+# When unset, the default is 200 Mbps or 25 MB/s.
+# stream_throughput_outbound_megabits_per_sec: 200
+
+# Throttles all streaming file transfer between the datacenters,
+# this setting allows users to throttle inter dc stream throughput in addition
+# to throttling all network stream traffic as configured with
+# stream_throughput_outbound_megabits_per_sec
+# When unset, the default is 200 Mbps or 25 MB/s
+# inter_dc_stream_throughput_outbound_megabits_per_sec: 200
+
+# How long the coordinator should wait for read operations to complete
+read_request_timeout_in_ms: 5000
+# How long the coordinator should wait for seq or index scans to complete
+range_request_timeout_in_ms: 10000
+# How long the coordinator should wait for writes to complete
+write_request_timeout_in_ms: 2000
+# How long the coordinator should wait for counter writes to complete
+counter_write_request_timeout_in_ms: 5000
+# How long a coordinator should continue to retry a CAS operation
+# that contends with other proposals for the same row
+cas_contention_timeout_in_ms: 1000
+# How long the coordinator should wait for truncates to complete
+# (This can be much longer, because unless auto_snapshot is disabled
+# we need to flush first so we can snapshot before removing the data.)
+truncate_request_timeout_in_ms: 60000
+# The default timeout for other, miscellaneous operations
+request_timeout_in_ms: 10000
+
+# Enable operation timeout information exchange between nodes to accurately
+# measure request timeouts. If disabled, replicas will assume that requests
+# were forwarded to them instantly by the coordinator, which means that
+# under overload conditions we will waste that much extra time processing
+# already-timed-out requests.
+#
+# Warning: before enabling this property make sure to ntp is installed
+# and the times are synchronized between the nodes.
+cross_node_timeout: false
+
+# Set socket timeout for streaming operation.
+# The stream session is failed if no data/ack is received by any of the participants
+# within that period, which means this should also be sufficient to stream a large
+# sstable or rebuild table indexes.
+# Default value is 86400000ms, which means stale streams timeout after 24 hours.
+# A value of zero means stream sockets should never time out.
+# streaming_socket_timeout_in_ms: 86400000
+
+# phi value that must be reached for a host to be marked down.
+# most users should never need to adjust this.
+# phi_convict_threshold: 8
+
+# endpoint_snitch -- Set this to a class that implements
+# IEndpointSnitch. The snitch has two functions:
+#
+# - it teaches Cassandra enough about your network topology to route
+# requests efficiently
+# - it allows Cassandra to spread replicas around your cluster to avoid
+# correlated failures. It does this by grouping machines into
+# "datacenters" and "racks." Cassandra will do its best not to have
+# more than one replica on the same "rack" (which may not actually
+# be a physical location)
+#
+# CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH
+# ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss.
+# This means that if you start with the default SimpleSnitch, which
+# locates every node on "rack1" in "datacenter1", your only options
+# if you need to add another datacenter are GossipingPropertyFileSnitch
+# (and the older PFS). From there, if you want to migrate to an
+# incompatible snitch like Ec2Snitch you can do it by adding new nodes
+# under Ec2Snitch (which will locate them in a new "datacenter") and
+# decommissioning the old ones.
+#
+# Out of the box, Cassandra provides:
+#
+# SimpleSnitch:
+# Treats Strategy order as proximity. This can improve cache
+# locality when disabling read repair. Only appropriate for
+# single-datacenter deployments.
+#
+# GossipingPropertyFileSnitch
+# This should be your go-to snitch for production use. The rack
+# and datacenter for the local node are defined in
+# cassandra-rackdc.properties and propagated to other nodes via
+# gossip. If cassandra-topology.properties exists, it is used as a
+# fallback, allowing migration from the PropertyFileSnitch.
+#
+# PropertyFileSnitch:
+# Proximity is determined by rack and data center, which are
+# explicitly configured in cassandra-topology.properties.
+#
+# Ec2Snitch:
+# Appropriate for EC2 deployments in a single Region. Loads Region
+# and Availability Zone information from the EC2 API. The Region is
+# treated as the datacenter, and the Availability Zone as the rack.
+# Only private IPs are used, so this will not work across multiple
+# Regions.
+#
+# Ec2MultiRegionSnitch:
+# Uses public IPs as broadcast_address to allow cross-region
+# connectivity. (Thus, you should set seed addresses to the public
+# IP as well.) You will need to open the storage_port or
+# ssl_storage_port on the public IP firewall. (For intra-Region
+# traffic, Cassandra will switch to the private IP after
+# establishing a connection.)
+#
+# RackInferringSnitch:
+# Proximity is determined by rack and data center, which are
+# assumed to correspond to the 3rd and 2nd octet of each node's IP
+# address, respectively. Unless this happens to match your
+# deployment conventions, this is best used as an example of
+# writing a custom Snitch class and is provided in that spirit.
+#
+# You can use a custom Snitch by setting this to the full class name
+# of the snitch, which will be assumed to be on your classpath.
+endpoint_snitch: SimpleSnitch
+
+# controls how often to perform the more expensive part of host score
+# calculation
+dynamic_snitch_update_interval_in_ms: 100
+# controls how often to reset all host scores, allowing a bad host to
+# possibly recover
+dynamic_snitch_reset_interval_in_ms: 600000
+# if set greater than zero and read_repair_chance is < 1.0, this will allow
+# 'pinning' of replicas to hosts in order to increase cache capacity.
+# The badness threshold will control how much worse the pinned host has to be
+# before the dynamic snitch will prefer other replicas over it. This is
+# expressed as a double which represents a percentage. Thus, a value of
+# 0.2 means Cassandra would continue to prefer the static snitch values
+# until the pinned host was 20% worse than the fastest.
+dynamic_snitch_badness_threshold: 0.1
+
+# request_scheduler -- Set this to a class that implements
+# RequestScheduler, which will schedule incoming client requests
+# according to the specific policy. This is useful for multi-tenancy
+# with a single Cassandra cluster.
+# NOTE: This is specifically for requests from the client and does
+# not affect inter node communication.
+# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
+# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
+# client requests to a node with a separate queue for each
+# request_scheduler_id. The scheduler is further customized by
+# request_scheduler_options as described below.
+request_scheduler: org.apache.cassandra.scheduler.NoScheduler
+
+# Scheduler Options vary based on the type of scheduler
+#
+# NoScheduler
+# Has no options
+#
+# RoundRobin
+# throttle_limit
+# The throttle_limit is the number of in-flight
+# requests per client. Requests beyond
+# that limit are queued up until
+# running requests can complete.
+# The value of 80 here is twice the number of
+# concurrent_reads + concurrent_writes.
+# default_weight
+# default_weight is optional and allows for
+# overriding the default which is 1.
+# weights
+# Weights are optional and will default to 1 or the
+# overridden default_weight. The weight translates into how
+# many requests are handled during each turn of the
+# RoundRobin, based on the scheduler id.
+#
+# request_scheduler_options:
+# throttle_limit: 80
+# default_weight: 5
+# weights:
+# Keyspace1: 1
+# Keyspace2: 5
+
+# request_scheduler_id -- An identifier based on which to perform
+# the request scheduling. Currently the only valid option is keyspace.
+# request_scheduler_id: keyspace
+
+# Enable or disable inter-node encryption
+# JVM defaults for supported SSL socket protocols and cipher suites can
+# be replaced using custom encryption options. This is not recommended
+# unless you have policies in place that dictate certain settings, or
+# need to disable vulnerable ciphers or protocols in case the JVM cannot
+# be updated.
+# FIPS compliant settings can be configured at JVM level and should not
+# involve changing encryption settings here:
+# https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html
+# *NOTE* No custom encryption options are enabled at the moment
+# The available internode options are : all, none, dc, rack
+#
+# If set to dc cassandra will encrypt the traffic between the DCs
+# If set to rack cassandra will encrypt the traffic between the racks
+#
+# The passwords used in these options must match the passwords used when generating
+# the keystore and truststore. For instructions on generating these files, see:
+# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
+#
+server_encryption_options:
+ internode_encryption: none
+ keystore: conf/.keystore
+ keystore_password: cassandra
+ truststore: conf/.truststore
+ truststore_password: cassandra
+ # More advanced defaults below:
+ # protocol: TLS
+ # algorithm: SunX509
+ # store_type: JKS
+ # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
+ # require_client_auth: false
+ # require_endpoint_verification: false
+
+# enable or disable client/server encryption.
+client_encryption_options:
+ enabled: false
+ # If enabled and optional is set to true encrypted and unencrypted connections are handled.
+ optional: false
+ keystore: conf/.keystore
+ keystore_password: cassandra
+ # require_client_auth: false
+ # Set trustore and truststore_password if require_client_auth is true
+ # truststore: conf/.truststore
+ # truststore_password: cassandra
+ # More advanced defaults below:
+ # protocol: TLS
+ # algorithm: SunX509
+ # store_type: JKS
+ # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
+
+# internode_compression controls whether traffic between nodes is
+# compressed.
+# Can be:
+#
+# all
+# all traffic is compressed
+#
+# dc
+# traffic between different datacenters is compressed
+#
+# none
+# nothing is compressed.
+internode_compression: dc
+
+# Enable or disable tcp_nodelay for inter-dc communication.
+# Disabling it will result in larger (but fewer) network packets being sent,
+# reducing overhead from the TCP protocol itself, at the cost of increasing
+# latency if you block for cross-datacenter responses.
+inter_dc_tcp_nodelay: false
+
+# TTL for different trace types used during logging of the repair process.
+tracetype_query_ttl: 86400
+tracetype_repair_ttl: 604800
+
+# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level
+# This threshold can be adjusted to minimize logging if necessary
+# gc_log_threshold_in_ms: 200
+
+# If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at
+# INFO level
+# UDFs (user defined functions) are disabled by default.
+# As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code.
+enable_user_defined_functions: false
+
+# Enables scripted UDFs (JavaScript UDFs).
+# Java UDFs are always enabled, if enable_user_defined_functions is true.
+# Enable this option to be able to use UDFs with "language javascript" or any custom JSR-223 provider.
+# This option has no effect, if enable_user_defined_functions is false.
+enable_scripted_user_defined_functions: false
+
+# The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation.
+# Lowering this value on Windows can provide much tighter latency and better throughput, however
+# some virtualized environments may see a negative performance impact from changing this setting
+# below their system default. The sysinternals 'clockres' tool can confirm your system's default
+# setting.
+windows_timer_interval: 1
+
+
+# Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from
+# a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by
+# the "key_alias" is the only key that will be used for encrypt opertaions; previously used keys
+# can still (and should!) be in the keystore and will be used on decrypt operations
+# (to handle the case of key rotation).
+#
+# It is strongly recommended to download and install Java Cryptography Extension (JCE)
+# Unlimited Strength Jurisdiction Policy Files for your version of the JDK.
+# (current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html)
+#
+# Currently, only the following file types are supported for transparent data encryption, although
+# more are coming in future cassandra releases: commitlog, hints
+transparent_data_encryption_options:
+ enabled: false
+ chunk_length_kb: 64
+ cipher: AES/CBC/PKCS5Padding
+ key_alias: testing:1
+ # CBC IV length for AES needs to be 16 bytes (which is also the default size)
+ # iv_length: 16
+ key_provider:
+ - class_name: org.apache.cassandra.security.JKSKeyProvider
+ parameters:
+ - keystore: conf/.keystore
+ keystore_password: cassandra
+ store_type: JCEKS
+ key_password: cassandra
+
+
+#####################
+# SAFETY THRESHOLDS #
+#####################
+
+# When executing a scan, within or across a partition, we need to keep the
+# tombstones seen in memory so we can return them to the coordinator, which
+# will use them to make sure other replicas also know about the deleted rows.
+# With workloads that generate a lot of tombstones, this can cause performance
+# problems and even exaust the server heap.
+# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets)
+# Adjust the thresholds here if you understand the dangers and want to
+# scan more tombstones anyway. These thresholds may also be adjusted at runtime
+# using the StorageService mbean.
+tombstone_warn_threshold: 1000
+tombstone_failure_threshold: 100000
+
+# Log WARN on any batch size exceeding this value. 5kb per batch by default.
+# Caution should be taken on increasing the size of this threshold as it can lead to node instability.
+batch_size_warn_threshold_in_kb: 5
+
+# Fail any batch exceeding this value. 50kb (10x warn threshold) by default.
+batch_size_fail_threshold_in_kb: 50
+
+# Log WARN on any batches not of type LOGGED than span across more partitions than this limit
+unlogged_batch_across_partitions_warn_threshold: 10
+
+# Log a warning when compacting partitions larger than this value
+compaction_large_partition_warning_threshold_mb: 100
+
+# GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level
+# Adjust the threshold based on your application throughput requirement
+# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level
+gc_warn_threshold_in_ms: 1000
+
+# Maximum size of any value in SSTables. Safety measure to detect SSTable corruption
+# early. Any value size larger than this threshold will result into marking an SSTable
+# as corrupted.
+# max_value_size_in_mb: 256
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/util.go b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/util.go
new file mode 100644
index 0000000..ef3d1ee
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/util.go
@@ -0,0 +1,95 @@
+package cassandra
+
+import (
+ "crypto/tls"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/gocql/gocql"
+ "github.com/hashicorp/vault/helper/certutil"
+ "github.com/hashicorp/vault/helper/tlsutil"
+ "github.com/hashicorp/vault/logical"
+)
+
+// Query templates a query for us.
+func substQuery(tpl string, data map[string]string) string {
+ for k, v := range data {
+ tpl = strings.Replace(tpl, fmt.Sprintf("{{%s}}", k), v, -1)
+ }
+
+ return tpl
+}
+
+func createSession(cfg *sessionConfig, s logical.Storage) (*gocql.Session, error) {
+ clusterConfig := gocql.NewCluster(strings.Split(cfg.Hosts, ",")...)
+ clusterConfig.Authenticator = gocql.PasswordAuthenticator{
+ Username: cfg.Username,
+ Password: cfg.Password,
+ }
+
+ clusterConfig.ProtoVersion = cfg.ProtocolVersion
+ if clusterConfig.ProtoVersion == 0 {
+ clusterConfig.ProtoVersion = 2
+ }
+
+ clusterConfig.Timeout = time.Duration(cfg.ConnectTimeout) * time.Second
+
+ if cfg.TLS {
+ var tlsConfig *tls.Config
+ if len(cfg.Certificate) > 0 || len(cfg.IssuingCA) > 0 {
+ if len(cfg.Certificate) > 0 && len(cfg.PrivateKey) == 0 {
+ return nil, fmt.Errorf("Found certificate for TLS authentication but no private key")
+ }
+
+ certBundle := &certutil.CertBundle{}
+ if len(cfg.Certificate) > 0 {
+ certBundle.Certificate = cfg.Certificate
+ certBundle.PrivateKey = cfg.PrivateKey
+ }
+ if len(cfg.IssuingCA) > 0 {
+ certBundle.IssuingCA = cfg.IssuingCA
+ }
+
+ parsedCertBundle, err := certBundle.ToParsedCertBundle()
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse certificate bundle: %s", err)
+ }
+
+ tlsConfig, err = parsedCertBundle.GetTLSConfig(certutil.TLSClient)
+ if err != nil || tlsConfig == nil {
+ return nil, fmt.Errorf("failed to get TLS configuration: tlsConfig:%#v err:%v", tlsConfig, err)
+ }
+ tlsConfig.InsecureSkipVerify = cfg.InsecureTLS
+
+ if cfg.TLSMinVersion != "" {
+ var ok bool
+ tlsConfig.MinVersion, ok = tlsutil.TLSLookup[cfg.TLSMinVersion]
+ if !ok {
+ return nil, fmt.Errorf("invalid 'tls_min_version' in config")
+ }
+ } else {
+ // MinVersion was not being set earlier. Reset it to
+ // zero to gracefully handle upgrades.
+ tlsConfig.MinVersion = 0
+ }
+ }
+
+ clusterConfig.SslOpts = &gocql.SslOptions{
+ Config: tlsConfig,
+ }
+ }
+
+ session, err := clusterConfig.CreateSession()
+ if err != nil {
+ return nil, fmt.Errorf("Error creating session: %s", err)
+ }
+
+ // Verify the info
+ err = session.Query(`LIST USERS`).Exec()
+ if err != nil {
+ return nil, fmt.Errorf("Error validating connection info: %s", err)
+ }
+
+ return session, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/consul/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/consul/backend.go
new file mode 100644
index 0000000..0b4351f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/consul/backend.go
@@ -0,0 +1,32 @@
+package consul
+
+import (
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
+ return Backend().Setup(conf)
+}
+
+func Backend() *backend {
+ var b backend
+ b.Backend = &framework.Backend{
+ Paths: []*framework.Path{
+ pathConfigAccess(),
+ pathListRoles(&b),
+ pathRoles(),
+ pathToken(&b),
+ },
+
+ Secrets: []*framework.Secret{
+ secretToken(&b),
+ },
+ }
+
+ return &b
+}
+
+type backend struct {
+ *framework.Backend
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/consul/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/consul/backend_test.go
new file mode 100644
index 0000000..b242657
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/consul/backend_test.go
@@ -0,0 +1,474 @@
+package consul
+
+import (
+ "encoding/base64"
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "sync"
+ "testing"
+ "time"
+
+ consulapi "github.com/hashicorp/consul/api"
+ "github.com/hashicorp/vault/logical"
+ logicaltest "github.com/hashicorp/vault/logical/testing"
+ "github.com/mitchellh/mapstructure"
+ dockertest "gopkg.in/ory-am/dockertest.v2"
+)
+
+var (
+ testImagePull sync.Once
+)
+
+func prepareTestContainer(t *testing.T, s logical.Storage, b logical.Backend) (cid dockertest.ContainerID, retAddress string) {
+ if os.Getenv("CONSUL_ADDR") != "" {
+ return "", os.Getenv("CONSUL_ADDR")
+ }
+
+ // Without this the checks for whether the container has started seem to
+ // never actually pass. There's really no reason to expose the test
+ // containers, so don't.
+ dockertest.BindDockerToLocalhost = "yep"
+
+ testImagePull.Do(func() {
+ dockertest.Pull(dockertest.ConsulImageName)
+ })
+
+ try := 0
+ cid, connErr := dockertest.ConnectToConsul(60, 500*time.Millisecond, func(connAddress string) bool {
+ try += 1
+ // Build a client and verify that the credentials work
+ config := consulapi.DefaultConfig()
+ config.Address = connAddress
+ config.Token = dockertest.ConsulACLMasterToken
+ client, err := consulapi.NewClient(config)
+ if err != nil {
+ if try > 50 {
+ panic(err)
+ }
+ return false
+ }
+
+ _, err = client.KV().Put(&consulapi.KVPair{
+ Key: "setuptest",
+ Value: []byte("setuptest"),
+ }, nil)
+ if err != nil {
+ if try > 50 {
+ panic(err)
+ }
+ return false
+ }
+
+ retAddress = connAddress
+ return true
+ })
+
+ if connErr != nil {
+ t.Fatalf("could not connect to consul: %v", connErr)
+ }
+
+ return
+}
+
+func cleanupTestContainer(t *testing.T, cid dockertest.ContainerID) {
+ err := cid.KillRemove()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestBackend_config_access(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cid, connURL := prepareTestContainer(t, config.StorageView, b)
+ if cid != "" {
+ defer cleanupTestContainer(t, cid)
+ }
+ connData := map[string]interface{}{
+ "address": connURL,
+ "token": dockertest.ConsulACLMasterToken,
+ }
+
+ confReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "config/access",
+ Storage: config.StorageView,
+ Data: connData,
+ }
+
+ resp, err := b.HandleRequest(confReq)
+ if err != nil || (resp != nil && resp.IsError()) || resp != nil {
+ t.Fatalf("failed to write configuration: resp:%#v err:%s", resp, err)
+ }
+
+ confReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(confReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("failed to write configuration: resp:%#v err:%s", resp, err)
+ }
+
+ expected := map[string]interface{}{
+ "address": connData["address"].(string),
+ "scheme": "http",
+ }
+ if !reflect.DeepEqual(expected, resp.Data) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v\n", expected, resp.Data)
+ }
+ if resp.Data["token"] != nil {
+ t.Fatalf("token should not be set in the response")
+ }
+}
+
+func TestBackend_basic(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cid, connURL := prepareTestContainer(t, config.StorageView, b)
+ if cid != "" {
+ defer cleanupTestContainer(t, cid)
+ }
+ connData := map[string]interface{}{
+ "address": connURL,
+ "token": dockertest.ConsulACLMasterToken,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepConfig(t, connData),
+ testAccStepWritePolicy(t, "test", testPolicy, ""),
+ testAccStepReadToken(t, "test", connData),
+ },
+ })
+}
+
+func TestBackend_renew_revoke(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cid, connURL := prepareTestContainer(t, config.StorageView, b)
+ if cid != "" {
+ defer cleanupTestContainer(t, cid)
+ }
+ connData := map[string]interface{}{
+ "address": connURL,
+ "token": dockertest.ConsulACLMasterToken,
+ }
+
+ req := &logical.Request{
+ Storage: config.StorageView,
+ Operation: logical.UpdateOperation,
+ Path: "config/access",
+ Data: connData,
+ }
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ req.Path = "roles/test"
+ req.Data = map[string]interface{}{
+ "policy": base64.StdEncoding.EncodeToString([]byte(testPolicy)),
+ "lease": "6h",
+ }
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ req.Operation = logical.ReadOperation
+ req.Path = "creds/test"
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil {
+ t.Fatal("resp nil")
+ }
+ if resp.IsError() {
+ t.Fatalf("resp is error: %v", resp.Error())
+ }
+
+ generatedSecret := resp.Secret
+ generatedSecret.IssueTime = time.Now()
+ generatedSecret.TTL = 6 * time.Hour
+
+ var d struct {
+ Token string `mapstructure:"token"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ t.Fatal(err)
+ }
+ log.Printf("[WARN] Generated token: %s", d.Token)
+
+ // Build a client and verify that the credentials work
+ consulapiConfig := consulapi.DefaultConfig()
+ consulapiConfig.Address = connData["address"].(string)
+ consulapiConfig.Token = d.Token
+ client, err := consulapi.NewClient(consulapiConfig)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ log.Printf("[WARN] Verifying that the generated token works...")
+ _, err = client.KV().Put(&consulapi.KVPair{
+ Key: "foo",
+ Value: []byte("bar"),
+ }, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ req.Operation = logical.RenewOperation
+ req.Secret = generatedSecret
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil {
+ t.Fatal("got nil response from renew")
+ }
+
+ req.Operation = logical.RevokeOperation
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ log.Printf("[WARN] Verifying that the generated token does not work...")
+ _, err = client.KV().Put(&consulapi.KVPair{
+ Key: "foo",
+ Value: []byte("bar"),
+ }, nil)
+ if err == nil {
+ t.Fatal("expected error")
+ }
+}
+
+func TestBackend_management(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cid, connURL := prepareTestContainer(t, config.StorageView, b)
+ if cid != "" {
+ defer cleanupTestContainer(t, cid)
+ }
+ connData := map[string]interface{}{
+ "address": connURL,
+ "token": dockertest.ConsulACLMasterToken,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepConfig(t, connData),
+ testAccStepWriteManagementPolicy(t, "test", ""),
+ testAccStepReadManagementToken(t, "test", connData),
+ },
+ })
+}
+
+func TestBackend_crud(t *testing.T) {
+ b, _ := Factory(logical.TestBackendConfig())
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepWritePolicy(t, "test", testPolicy, ""),
+ testAccStepWritePolicy(t, "test2", testPolicy, ""),
+ testAccStepWritePolicy(t, "test3", testPolicy, ""),
+ testAccStepReadPolicy(t, "test", testPolicy, 0),
+ testAccStepListPolicy(t, []string{"test", "test2", "test3"}),
+ testAccStepDeletePolicy(t, "test"),
+ },
+ })
+}
+
+func TestBackend_role_lease(t *testing.T) {
+ b, _ := Factory(logical.TestBackendConfig())
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepWritePolicy(t, "test", testPolicy, "6h"),
+ testAccStepReadPolicy(t, "test", testPolicy, 6*time.Hour),
+ testAccStepDeletePolicy(t, "test"),
+ },
+ })
+}
+
+func testAccStepConfig(
+ t *testing.T, config map[string]interface{}) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config/access",
+ Data: config,
+ }
+}
+
+func testAccStepReadToken(
+ t *testing.T, name string, conf map[string]interface{}) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "creds/" + name,
+ Check: func(resp *logical.Response) error {
+ var d struct {
+ Token string `mapstructure:"token"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+ log.Printf("[WARN] Generated token: %s", d.Token)
+
+ // Build a client and verify that the credentials work
+ config := consulapi.DefaultConfig()
+ config.Address = conf["address"].(string)
+ config.Token = d.Token
+ client, err := consulapi.NewClient(config)
+ if err != nil {
+ return err
+ }
+
+ log.Printf("[WARN] Verifying that the generated token works...")
+ _, err = client.KV().Put(&consulapi.KVPair{
+ Key: "foo",
+ Value: []byte("bar"),
+ }, nil)
+ if err != nil {
+ return err
+ }
+
+ return nil
+ },
+ }
+}
+
+func testAccStepReadManagementToken(
+ t *testing.T, name string, conf map[string]interface{}) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "creds/" + name,
+ Check: func(resp *logical.Response) error {
+ var d struct {
+ Token string `mapstructure:"token"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+ log.Printf("[WARN] Generated token: %s", d.Token)
+
+ // Build a client and verify that the credentials work
+ config := consulapi.DefaultConfig()
+ config.Address = conf["address"].(string)
+ config.Token = d.Token
+ client, err := consulapi.NewClient(config)
+ if err != nil {
+ return err
+ }
+
+ log.Printf("[WARN] Verifying that the generated token works...")
+ _, _, err = client.ACL().Create(&consulapi.ACLEntry{
+ Type: "management",
+ Name: "test2",
+ }, nil)
+ if err != nil {
+ return err
+ }
+
+ return nil
+ },
+ }
+}
+
+func testAccStepWritePolicy(t *testing.T, name string, policy string, lease string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "roles/" + name,
+ Data: map[string]interface{}{
+ "policy": base64.StdEncoding.EncodeToString([]byte(policy)),
+ "lease": lease,
+ },
+ }
+}
+
+func testAccStepWriteManagementPolicy(t *testing.T, name string, lease string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "roles/" + name,
+ Data: map[string]interface{}{
+ "token_type": "management",
+ "lease": lease,
+ },
+ }
+}
+
+func testAccStepReadPolicy(t *testing.T, name string, policy string, lease time.Duration) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "roles/" + name,
+ Check: func(resp *logical.Response) error {
+ policyRaw := resp.Data["policy"].(string)
+ out, err := base64.StdEncoding.DecodeString(policyRaw)
+ if err != nil {
+ return err
+ }
+ if string(out) != policy {
+ return fmt.Errorf("mismatch: %s %s", out, policy)
+ }
+
+ leaseRaw := resp.Data["lease"].(string)
+ l, err := time.ParseDuration(leaseRaw)
+ if err != nil {
+ return err
+ }
+ if l != lease {
+ return fmt.Errorf("mismatch: %v %v", l, lease)
+ }
+ return nil
+ },
+ }
+}
+
+func testAccStepListPolicy(t *testing.T, names []string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ListOperation,
+ Path: "roles/",
+ Check: func(resp *logical.Response) error {
+ respKeys := resp.Data["keys"].([]string)
+ if !reflect.DeepEqual(respKeys, names) {
+ return fmt.Errorf("mismatch: %#v %#v", respKeys, names)
+ }
+ return nil
+ },
+ }
+}
+
+func testAccStepDeletePolicy(t *testing.T, name string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.DeleteOperation,
+ Path: "roles/" + name,
+ }
+}
+
+const testPolicy = `
+key "" {
+ policy = "write"
+}
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/consul/client.go b/vendor/github.com/hashicorp/vault/builtin/logical/consul/client.go
new file mode 100644
index 0000000..d519a88
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/consul/client.go
@@ -0,0 +1,29 @@
+package consul
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/consul/api"
+ "github.com/hashicorp/vault/logical"
+)
+
+func client(s logical.Storage) (*api.Client, error, error) {
+ conf, userErr, intErr := readConfigAccess(s)
+ if intErr != nil {
+ return nil, nil, intErr
+ }
+ if userErr != nil {
+ return nil, userErr, nil
+ }
+ if conf == nil {
+ return nil, nil, fmt.Errorf("no error received but no configuration found")
+ }
+
+ consulConf := api.DefaultNonPooledConfig()
+ consulConf.Address = conf.Address
+ consulConf.Scheme = conf.Scheme
+ consulConf.Token = conf.Token
+
+ client, err := api.NewClient(consulConf)
+ return client, nil, err
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/consul/path_config.go b/vendor/github.com/hashicorp/vault/builtin/logical/consul/path_config.go
new file mode 100644
index 0000000..09f0f3f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/consul/path_config.go
@@ -0,0 +1,104 @@
+package consul
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathConfigAccess() *framework.Path {
+ return &framework.Path{
+ Pattern: "config/access",
+ Fields: map[string]*framework.FieldSchema{
+ "address": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Consul server address",
+ },
+
+ "scheme": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "URI scheme for the Consul address",
+
+ // https would be a better default but Consul on its own
+ // defaults to HTTP access, and when HTTPS is enabled it
+ // disables HTTP, so there isn't really any harm done here.
+ Default: "http",
+ },
+
+ "token": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Token for API calls",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: pathConfigAccessRead,
+ logical.UpdateOperation: pathConfigAccessWrite,
+ },
+ }
+}
+
+func readConfigAccess(storage logical.Storage) (*accessConfig, error, error) {
+ entry, err := storage.Get("config/access")
+ if err != nil {
+ return nil, nil, err
+ }
+ if entry == nil {
+ return nil, fmt.Errorf(
+ "Access credentials for the backend itself haven't been configured. Please configure them at the '/config/access' endpoint"),
+ nil
+ }
+
+ conf := &accessConfig{}
+ if err := entry.DecodeJSON(conf); err != nil {
+ return nil, nil, fmt.Errorf("error reading consul access configuration: %s", err)
+ }
+
+ return conf, nil, nil
+}
+
+func pathConfigAccessRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ conf, userErr, intErr := readConfigAccess(req.Storage)
+ if intErr != nil {
+ return nil, intErr
+ }
+ if userErr != nil {
+ return logical.ErrorResponse(userErr.Error()), nil
+ }
+ if conf == nil {
+ return nil, fmt.Errorf("no user error reported but consul access configuration not found")
+ }
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "address": conf.Address,
+ "scheme": conf.Scheme,
+ },
+ }, nil
+}
+
+func pathConfigAccessWrite(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ entry, err := logical.StorageEntryJSON("config/access", accessConfig{
+ Address: data.Get("address").(string),
+ Scheme: data.Get("scheme").(string),
+ Token: data.Get("token").(string),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+type accessConfig struct {
+ Address string `json:"address"`
+ Scheme string `json:"scheme"`
+ Token string `json:"token"`
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/consul/path_roles.go b/vendor/github.com/hashicorp/vault/builtin/logical/consul/path_roles.go
new file mode 100644
index 0000000..9b4087b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/consul/path_roles.go
@@ -0,0 +1,171 @@
+package consul
+
+import (
+ "encoding/base64"
+ "fmt"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathListRoles(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "roles/?$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.pathRoleList,
+ },
+ }
+}
+
+func pathRoles() *framework.Path {
+ return &framework.Path{
+ Pattern: "roles/" + framework.GenericNameRegex("name"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role",
+ },
+
+ "policy": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `Policy document, base64 encoded. Required
+for 'client' tokens.`,
+ },
+
+ "token_type": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "client",
+ Description: `Which type of token to create: 'client'
+or 'management'. If a 'management' token,
+the "policy" parameter is not required.
+Defaults to 'client'.`,
+ },
+
+ "lease": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Lease time of the role.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: pathRolesRead,
+ logical.UpdateOperation: pathRolesWrite,
+ logical.DeleteOperation: pathRolesDelete,
+ },
+ }
+}
+
+func (b *backend) pathRoleList(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ entries, err := req.Storage.List("policy/")
+ if err != nil {
+ return nil, err
+ }
+
+ return logical.ListResponse(entries), nil
+}
+
+func pathRolesRead(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ name := d.Get("name").(string)
+
+ entry, err := req.Storage.Get("policy/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result roleConfig
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ if result.TokenType == "" {
+ result.TokenType = "client"
+ }
+
+ // Generate the response
+ resp := &logical.Response{
+ Data: map[string]interface{}{
+ "lease": result.Lease.String(),
+ "token_type": result.TokenType,
+ },
+ }
+ if result.Policy != "" {
+ resp.Data["policy"] = base64.StdEncoding.EncodeToString([]byte(result.Policy))
+ }
+ return resp, nil
+}
+
+func pathRolesWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ tokenType := d.Get("token_type").(string)
+
+ switch tokenType {
+ case "client":
+ case "management":
+ default:
+ return logical.ErrorResponse(
+ "token_type must be \"client\" or \"management\""), nil
+ }
+
+ name := d.Get("name").(string)
+ policy := d.Get("policy").(string)
+ var policyRaw []byte
+ var err error
+ if tokenType != "management" {
+ if policy == "" {
+ return logical.ErrorResponse(
+ "policy cannot be empty when not using management tokens"), nil
+ }
+ policyRaw, err = base64.StdEncoding.DecodeString(d.Get("policy").(string))
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Error decoding policy base64: %s", err)), nil
+ }
+ }
+
+ var lease time.Duration
+ leaseParam := d.Get("lease").(string)
+ if leaseParam != "" {
+ lease, err = time.ParseDuration(leaseParam)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "error parsing given lease of %s: %s", leaseParam, err)), nil
+ }
+ }
+
+ entry, err := logical.StorageEntryJSON("policy/"+name, roleConfig{
+ Policy: string(policyRaw),
+ Lease: lease,
+ TokenType: tokenType,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func pathRolesDelete(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ name := d.Get("name").(string)
+ if err := req.Storage.Delete("policy/" + name); err != nil {
+ return nil, err
+ }
+ return nil, nil
+}
+
+type roleConfig struct {
+ Policy string `json:"policy"`
+ Lease time.Duration `json:"lease"`
+ TokenType string `json:"token_type"`
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/consul/path_token.go b/vendor/github.com/hashicorp/vault/builtin/logical/consul/path_token.go
new file mode 100644
index 0000000..bce276d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/consul/path_token.go
@@ -0,0 +1,80 @@
+package consul
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/hashicorp/consul/api"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathToken(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "creds/" + framework.GenericNameRegex("name"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathTokenRead,
+ },
+ }
+}
+
+func (b *backend) pathTokenRead(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ name := d.Get("name").(string)
+
+ entry, err := req.Storage.Get("policy/" + name)
+ if err != nil {
+ return nil, fmt.Errorf("error retrieving role: %s", err)
+ }
+ if entry == nil {
+ return logical.ErrorResponse(fmt.Sprintf("Role '%s' not found", name)), nil
+ }
+
+ var result roleConfig
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ if result.TokenType == "" {
+ result.TokenType = "client"
+ }
+
+ // Get the consul client
+ c, userErr, intErr := client(req.Storage)
+ if intErr != nil {
+ return nil, intErr
+ }
+ if userErr != nil {
+ return logical.ErrorResponse(userErr.Error()), nil
+ }
+
+ // Generate a name for the token
+ tokenName := fmt.Sprintf("Vault %s %s %d", name, req.DisplayName, time.Now().UnixNano())
+
+ // Create it
+ token, _, err := c.ACL().Create(&api.ACLEntry{
+ Name: tokenName,
+ Type: result.TokenType,
+ Rules: result.Policy,
+ }, nil)
+ if err != nil {
+ return logical.ErrorResponse(err.Error()), nil
+ }
+
+ // Use the helper to create the secret
+ s := b.Secret(SecretTokenType).Response(map[string]interface{}{
+ "token": token,
+ }, map[string]interface{}{
+ "token": token,
+ })
+ s.Secret.TTL = result.Lease
+
+ return s, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/consul/secret_token.go b/vendor/github.com/hashicorp/vault/builtin/logical/consul/secret_token.go
new file mode 100644
index 0000000..3388946
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/consul/secret_token.go
@@ -0,0 +1,59 @@
+package consul
+
+import (
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+const (
+ SecretTokenType = "token"
+)
+
+func secretToken(b *backend) *framework.Secret {
+ return &framework.Secret{
+ Type: SecretTokenType,
+ Fields: map[string]*framework.FieldSchema{
+ "token": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Request token",
+ },
+ },
+
+ Renew: b.secretTokenRenew,
+ Revoke: secretTokenRevoke,
+ }
+}
+
+func (b *backend) secretTokenRenew(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+
+ return framework.LeaseExtend(0, 0, b.System())(req, d)
+}
+
+func secretTokenRevoke(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ c, userErr, intErr := client(req.Storage)
+ if intErr != nil {
+ return nil, intErr
+ }
+ if userErr != nil {
+ // Returning logical.ErrorResponse from revocation function is risky
+ return nil, userErr
+ }
+
+ tokenRaw, ok := req.Secret.InternalData["token"]
+ if !ok {
+ // We return nil here because this is a pre-0.5.3 problem and there is
+ // nothing we can do about it. We already can't revoke the lease
+ // properly if it has been renewed and this is documented pre-0.5.3
+ // behavior with a security bulletin about it.
+ return nil, nil
+ }
+
+ _, err := c.ACL().Destroy(tokenRaw.(string), nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/backend.go
new file mode 100644
index 0000000..e9f3b29
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/backend.go
@@ -0,0 +1,132 @@
+package mongodb
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ "gopkg.in/mgo.v2"
+)
+
+func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
+ return Backend().Setup(conf)
+}
+
+func Backend() *framework.Backend {
+ var b backend
+ b.Backend = &framework.Backend{
+ Help: strings.TrimSpace(backendHelp),
+
+ Paths: []*framework.Path{
+ pathConfigConnection(&b),
+ pathConfigLease(&b),
+ pathListRoles(&b),
+ pathRoles(&b),
+ pathCredsCreate(&b),
+ },
+
+ Secrets: []*framework.Secret{
+ secretCreds(&b),
+ },
+
+ Clean: b.ResetSession,
+
+ Invalidate: b.invalidate,
+ }
+
+ return b.Backend
+}
+
+type backend struct {
+ *framework.Backend
+
+ session *mgo.Session
+ lock sync.Mutex
+}
+
+// Session returns the database connection.
+func (b *backend) Session(s logical.Storage) (*mgo.Session, error) {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ if b.session != nil {
+ if err := b.session.Ping(); err == nil {
+ return b.session, nil
+ }
+ b.session.Close()
+ }
+
+ connConfigJSON, err := s.Get("config/connection")
+ if err != nil {
+ return nil, err
+ }
+ if connConfigJSON == nil {
+ return nil, fmt.Errorf("configure the MongoDB connection with config/connection first")
+ }
+
+ var connConfig connectionConfig
+ if err := connConfigJSON.DecodeJSON(&connConfig); err != nil {
+ return nil, err
+ }
+
+ dialInfo, err := parseMongoURI(connConfig.URI)
+ if err != nil {
+ return nil, err
+ }
+
+ b.session, err = mgo.DialWithInfo(dialInfo)
+ if err != nil {
+ return nil, err
+ }
+ b.session.SetSyncTimeout(1 * time.Minute)
+ b.session.SetSocketTimeout(1 * time.Minute)
+
+ return b.session, nil
+}
+
+// ResetSession forces creation of a new connection next time Session() is called.
+func (b *backend) ResetSession() {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ if b.session != nil {
+ b.session.Close()
+ }
+
+ b.session = nil
+}
+
+func (b *backend) invalidate(key string) {
+ switch key {
+ case "config/connection":
+ b.ResetSession()
+ }
+}
+
+// LeaseConfig returns the lease configuration
+func (b *backend) LeaseConfig(s logical.Storage) (*configLease, error) {
+ entry, err := s.Get("config/lease")
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result configLease
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+const backendHelp = `
+The mongodb backend dynamically generates MongoDB credentials.
+
+After mounting this backend, configure it using the endpoints within
+the "config/" path.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/backend_test.go
new file mode 100644
index 0000000..dcd4b9f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/backend_test.go
@@ -0,0 +1,331 @@
+package mongodb
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+ logicaltest "github.com/hashicorp/vault/logical/testing"
+ "github.com/mitchellh/mapstructure"
+ dockertest "gopkg.in/ory-am/dockertest.v2"
+)
+
+var (
+ testImagePull sync.Once
+)
+
+func prepareTestContainer(t *testing.T, s logical.Storage, b logical.Backend) (cid dockertest.ContainerID, retURI string) {
+ if os.Getenv("MONGODB_URI") != "" {
+ return "", os.Getenv("MONGODB_URI")
+ }
+
+ // Without this the checks for whether the container has started seem to
+ // never actually pass. There's really no reason to expose the test
+ // containers, so don't.
+ dockertest.BindDockerToLocalhost = "yep"
+
+ testImagePull.Do(func() {
+ dockertest.Pull(dockertest.MongoDBImageName)
+ })
+
+ cid, connErr := dockertest.ConnectToMongoDB(60, 500*time.Millisecond, func(connURI string) bool {
+ connURI = "mongodb://" + connURI
+ // This will cause a validation to run
+ resp, err := b.HandleRequest(&logical.Request{
+ Storage: s,
+ Operation: logical.UpdateOperation,
+ Path: "config/connection",
+ Data: map[string]interface{}{
+ "uri": connURI,
+ },
+ })
+ if err != nil || (resp != nil && resp.IsError()) {
+ // It's likely not up and running yet, so return false and try again
+ return false
+ }
+ if resp == nil {
+ t.Fatal("expected warning")
+ }
+
+ retURI = connURI
+ return true
+ })
+
+ if connErr != nil {
+ t.Fatalf("could not connect to database: %v", connErr)
+ }
+
+ return
+}
+
+func cleanupTestContainer(t *testing.T, cid dockertest.ContainerID) {
+ err := cid.KillRemove()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestBackend_config_connection(t *testing.T) {
+ var resp *logical.Response
+ var err error
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ configData := map[string]interface{}{
+ "uri": "sample_connection_uri",
+ "verify_connection": false,
+ }
+
+ configReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "config/connection",
+ Storage: config.StorageView,
+ Data: configData,
+ }
+ resp, err = b.HandleRequest(configReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, resp)
+ }
+
+ configReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(configReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, resp)
+ }
+
+ if resp.Data["uri"] != configData["uri"] {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
+
+func TestBackend_basic(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cid, connURI := prepareTestContainer(t, config.StorageView, b)
+ if cid != "" {
+ defer cleanupTestContainer(t, cid)
+ }
+ connData := map[string]interface{}{
+ "uri": connURI,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepConfig(connData, false),
+ testAccStepRole(),
+ testAccStepReadCreds("web"),
+ },
+ })
+}
+
+func TestBackend_roleCrud(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cid, connURI := prepareTestContainer(t, config.StorageView, b)
+ if cid != "" {
+ defer cleanupTestContainer(t, cid)
+ }
+ connData := map[string]interface{}{
+ "uri": connURI,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepConfig(connData, false),
+ testAccStepRole(),
+ testAccStepReadRole("web", testDb, testMongoDBRoles),
+ testAccStepDeleteRole("web"),
+ testAccStepReadRole("web", "", ""),
+ },
+ })
+}
+
+func TestBackend_leaseWriteRead(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cid, connURI := prepareTestContainer(t, config.StorageView, b)
+ if cid != "" {
+ defer cleanupTestContainer(t, cid)
+ }
+ connData := map[string]interface{}{
+ "uri": connURI,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepConfig(connData, false),
+ testAccStepWriteLease(),
+ testAccStepReadLease(),
+ },
+ })
+
+}
+
+func testAccStepConfig(d map[string]interface{}, expectError bool) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config/connection",
+ Data: d,
+ ErrorOk: true,
+ Check: func(resp *logical.Response) error {
+ if expectError {
+ if resp.Data == nil {
+ return fmt.Errorf("data is nil")
+ }
+ var e struct {
+ Error string `mapstructure:"error"`
+ }
+ if err := mapstructure.Decode(resp.Data, &e); err != nil {
+ return err
+ }
+ if len(e.Error) == 0 {
+ return fmt.Errorf("expected error, but write succeeded.")
+ }
+ return nil
+ } else if resp != nil && resp.IsError() {
+ return fmt.Errorf("got an error response: %v", resp.Error())
+ }
+ return nil
+ },
+ }
+}
+
+func testAccStepRole() logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "roles/web",
+ Data: map[string]interface{}{
+ "db": testDb,
+ "roles": testMongoDBRoles,
+ },
+ }
+}
+
+func testAccStepDeleteRole(n string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.DeleteOperation,
+ Path: "roles/" + n,
+ }
+}
+
+func testAccStepReadCreds(name string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "creds/" + name,
+ Check: func(resp *logical.Response) error {
+ var d struct {
+ DB string `mapstructure:"db"`
+ Username string `mapstructure:"username"`
+ Password string `mapstructure:"password"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+
+ if d.DB == "" {
+ return fmt.Errorf("bad: %#v", resp)
+ }
+ if d.Username == "" {
+ return fmt.Errorf("bad: %#v", resp)
+ }
+ if !strings.HasPrefix(d.Username, "vault-root-") {
+ return fmt.Errorf("bad: %#v", resp)
+ }
+ if d.Password == "" {
+ return fmt.Errorf("bad: %#v", resp)
+ }
+
+ log.Printf("[WARN] Generated credentials: %v", d)
+
+ return nil
+ },
+ }
+}
+
+func testAccStepReadRole(name, db, mongoDBRoles string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "roles/" + name,
+ Check: func(resp *logical.Response) error {
+ if resp == nil {
+ if db == "" && mongoDBRoles == "" {
+ return nil
+ }
+
+ return fmt.Errorf("bad: %#v", resp)
+ }
+
+ var d struct {
+ DB string `mapstructure:"db"`
+ MongoDBRoles string `mapstructure:"roles"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+
+ if d.DB != db {
+ return fmt.Errorf("bad: %#v", resp)
+ }
+ if d.MongoDBRoles != mongoDBRoles {
+ return fmt.Errorf("bad: %#v", resp)
+ }
+
+ return nil
+ },
+ }
+}
+
+func testAccStepWriteLease() logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config/lease",
+ Data: map[string]interface{}{
+ "ttl": "1h5m",
+ "max_ttl": "24h",
+ },
+ }
+}
+
+func testAccStepReadLease() logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "config/lease",
+ Check: func(resp *logical.Response) error {
+ if resp.Data["ttl"].(float64) != 3900 || resp.Data["max_ttl"].(float64) != 86400 {
+ return fmt.Errorf("bad: %#v", resp)
+ }
+
+ return nil
+ },
+ }
+}
+
+const testDb = "foo"
+const testMongoDBRoles = `["readWrite",{"role":"read","db":"bar"}]`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/path_config_connection.go b/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/path_config_connection.go
new file mode 100644
index 0000000..9f615f9
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/path_config_connection.go
@@ -0,0 +1,118 @@
+package mongodb
+
+import (
+ "fmt"
+
+ "github.com/fatih/structs"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ "gopkg.in/mgo.v2"
+)
+
+func pathConfigConnection(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "config/connection",
+ Fields: map[string]*framework.FieldSchema{
+ "uri": {
+ Type: framework.TypeString,
+ Description: "MongoDB standard connection string (URI)",
+ },
+ "verify_connection": {
+ Type: framework.TypeBool,
+ Default: true,
+ Description: `If set, uri is verified by actually connecting to the database`,
+ },
+ },
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathConnectionRead,
+ logical.UpdateOperation: b.pathConnectionWrite,
+ },
+ HelpSynopsis: pathConfigConnectionHelpSyn,
+ HelpDescription: pathConfigConnectionHelpDesc,
+ }
+}
+
+// pathConnectionRead reads out the connection configuration
+func (b *backend) pathConnectionRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ entry, err := req.Storage.Get("config/connection")
+ if err != nil {
+ return nil, fmt.Errorf("failed to read connection configuration")
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var config connectionConfig
+ if err := entry.DecodeJSON(&config); err != nil {
+ return nil, err
+ }
+ return &logical.Response{
+ Data: structs.New(config).Map(),
+ }, nil
+}
+
+func (b *backend) pathConnectionWrite(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ uri := data.Get("uri").(string)
+ if uri == "" {
+ return logical.ErrorResponse("uri parameter is required"), nil
+ }
+
+ dialInfo, err := parseMongoURI(uri)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("invalid uri: %s", err)), nil
+ }
+
+ // Don't check the config if verification is disabled
+ verifyConnection := data.Get("verify_connection").(bool)
+ if verifyConnection {
+ // Verify the config
+ session, err := mgo.DialWithInfo(dialInfo)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Error validating connection info: %s", err)), nil
+ }
+ defer session.Close()
+ if err := session.Ping(); err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Error validating connection info: %s", err)), nil
+ }
+ }
+
+ // Store it
+ entry, err := logical.StorageEntryJSON("config/connection", connectionConfig{
+ URI: uri,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ // Reset the Session
+ b.ResetSession()
+
+ resp := &logical.Response{}
+ resp.AddWarning("Read access to this endpoint should be controlled via ACLs as it will return the connection URI as it is, including passwords, if any.")
+
+ return resp, nil
+}
+
+type connectionConfig struct {
+ URI string `json:"uri" structs:"uri" mapstructure:"uri"`
+}
+
+const pathConfigConnectionHelpSyn = `
+Configure the connection string to talk to MongoDB.
+`
+
+const pathConfigConnectionHelpDesc = `
+This path configures the standard connection string (URI) used to connect to MongoDB.
+
+A MongoDB URI looks like:
+"mongodb://[username:password@]host1[:port1][,host2[:port2],...[,hostN[:portN]]][/[database][?options]]"
+
+See https://docs.mongodb.org/manual/reference/connection-string/ for detailed documentation of the URI format.
+
+When configuring the connection string, the backend will verify its validity.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/path_config_lease.go b/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/path_config_lease.go
new file mode 100644
index 0000000..8f5bd7b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/path_config_lease.go
@@ -0,0 +1,92 @@
+package mongodb
+
+import (
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathConfigLease(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "config/lease",
+ Fields: map[string]*framework.FieldSchema{
+ "ttl": {
+ Type: framework.TypeDurationSecond,
+ Description: "Default ttl for credentials.",
+ },
+
+ "max_ttl": {
+ Type: framework.TypeDurationSecond,
+ Description: "Maximum time a set of credentials can be valid for.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathConfigLeaseRead,
+ logical.UpdateOperation: b.pathConfigLeaseWrite,
+ },
+
+ HelpSynopsis: pathConfigLeaseHelpSyn,
+ HelpDescription: pathConfigLeaseHelpDesc,
+ }
+}
+
+func (b *backend) pathConfigLeaseWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+
+ entry, err := logical.StorageEntryJSON("config/lease", &configLease{
+ TTL: time.Second * time.Duration(d.Get("ttl").(int)),
+ MaxTTL: time.Second * time.Duration(d.Get("max_ttl").(int)),
+ })
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *backend) pathConfigLeaseRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ leaseConfig, err := b.LeaseConfig(req.Storage)
+
+ if err != nil {
+ return nil, err
+ }
+ if leaseConfig == nil {
+ return nil, nil
+ }
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "ttl": leaseConfig.TTL.Seconds(),
+ "max_ttl": leaseConfig.MaxTTL.Seconds(),
+ },
+ }, nil
+}
+
+type configLease struct {
+ TTL time.Duration
+ MaxTTL time.Duration
+}
+
+const pathConfigLeaseHelpSyn = `
+Configure the default lease TTL settings for credentials
+generated by the mongodb backend.
+`
+
+const pathConfigLeaseHelpDesc = `
+This configures the default lease TTL settings used for
+credentials generated by this backend. The ttl specifies the
+duration that a set of credentials will be valid for before
+the lease must be renewed (if it is renewable), while the
+max_ttl specifies the overall maximum duration that the
+credentials will be valid regardless of lease renewals.
+
+The format for the TTL values is an integer and then unit. For
+example, the value "1h" specifies a 1-hour TTL. The longest
+supported unit is hours.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/path_creds_create.go b/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/path_creds_create.go
new file mode 100644
index 0000000..de80e0a
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/path_creds_create.go
@@ -0,0 +1,116 @@
+package mongodb
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathCredsCreate(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "creds/" + framework.GenericNameRegex("name"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": {
+ Type: framework.TypeString,
+ Description: "Name of the role to generate credentials for.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathCredsCreateRead,
+ },
+
+ HelpSynopsis: pathCredsCreateReadHelpSyn,
+ HelpDescription: pathCredsCreateReadHelpDesc,
+ }
+}
+
+func (b *backend) pathCredsCreateRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ name := data.Get("name").(string)
+
+ // Get the role
+ role, err := b.Role(req.Storage, name)
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return logical.ErrorResponse(fmt.Sprintf("unknown role: %s", name)), nil
+ }
+
+ // Determine if we have a lease configuration
+ leaseConfig, err := b.LeaseConfig(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+ if leaseConfig == nil {
+ leaseConfig = &configLease{}
+ }
+
+ // Generate the username and password
+ displayName := req.DisplayName
+ if displayName != "" {
+ displayName += "-"
+ }
+
+ userUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ return nil, err
+ }
+
+ username := fmt.Sprintf("vault-%s%s", displayName, userUUID)
+
+ password, err := uuid.GenerateUUID()
+ if err != nil {
+ return nil, err
+ }
+
+ // Build the user creation command
+ createUserCmd := createUserCommand{
+ Username: username,
+ Password: password,
+ Roles: role.MongoDBRoles.toStandardRolesArray(),
+ }
+
+ // Get our connection
+ session, err := b.Session(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+
+ // Create the user
+ err = session.DB(role.DB).Run(createUserCmd, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return the secret
+ resp := b.Secret(SecretCredsType).Response(map[string]interface{}{
+ "db": role.DB,
+ "username": username,
+ "password": password,
+ }, map[string]interface{}{
+ "username": username,
+ "db": role.DB,
+ })
+ resp.Secret.TTL = leaseConfig.TTL
+ return resp, nil
+}
+
+type createUserCommand struct {
+ Username string `bson:"createUser"`
+ Password string `bson:"pwd"`
+ Roles []interface{} `bson:"roles"`
+}
+
+const pathCredsCreateReadHelpSyn = `
+Request MongoDB database credentials for a particular role.
+`
+
+const pathCredsCreateReadHelpDesc = `
+This path reads generates MongoDB database credentials for
+a particular role. The database credentials will be
+generated on demand and will be automatically revoked when
+the lease is up.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/path_roles.go b/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/path_roles.go
new file mode 100644
index 0000000..d67e49c
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/path_roles.go
@@ -0,0 +1,228 @@
+package mongodb
+
+import (
+ "encoding/json"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathListRoles(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "roles/?$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.pathRoleList,
+ },
+
+ HelpSynopsis: pathRoleHelpSyn,
+ HelpDescription: pathRoleHelpDesc,
+ }
+}
+
+func pathRoles(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "roles/" + framework.GenericNameRegex("name"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": {
+ Type: framework.TypeString,
+ Description: "Name of the role.",
+ },
+ "db": {
+ Type: framework.TypeString,
+ Description: "Name of the authentication database for users generated for this role.",
+ },
+ "roles": {
+ Type: framework.TypeString,
+ Description: "MongoDB roles to assign to the users generated for this role.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathRoleRead,
+ logical.UpdateOperation: b.pathRoleCreate,
+ logical.DeleteOperation: b.pathRoleDelete,
+ },
+
+ HelpSynopsis: pathRoleHelpSyn,
+ HelpDescription: pathRoleHelpDesc,
+ }
+}
+
+func (b *backend) Role(s logical.Storage, n string) (*roleStorageEntry, error) {
+ entry, err := s.Get("role/" + n)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result roleStorageEntry
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+func (b *backend) pathRoleDelete(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ err := req.Storage.Delete("role/" + data.Get("name").(string))
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *backend) pathRoleRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ role, err := b.Role(req.Storage, data.Get("name").(string))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ rolesJsonBytes, err := json.Marshal(role.MongoDBRoles.toStandardRolesArray())
+ if err != nil {
+ return nil, err
+ }
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "db": role.DB,
+ "roles": string(rolesJsonBytes),
+ },
+ }, nil
+}
+
+func (b *backend) pathRoleList(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ entries, err := req.Storage.List("role/")
+ if err != nil {
+ return nil, err
+ }
+
+ return logical.ListResponse(entries), nil
+}
+
+func (b *backend) pathRoleCreate(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+
+ name := data.Get("name").(string)
+ if name == "" {
+ return logical.ErrorResponse("Missing name"), nil
+ }
+
+ roleDB := data.Get("db").(string)
+ if roleDB == "" {
+ return logical.ErrorResponse("db parameter is required"), nil
+ }
+
+ // Example roles JSON:
+ //
+ // [ "readWrite", { "role": "readWrite", "db": "test" } ]
+ //
+ // For storage, we convert such an array into a homogeneous array of role documents like:
+ //
+ // [ { "role": "readWrite" }, { "role": "readWrite", "db": "test" } ]
+ //
+ var roles []mongodbRole
+ rolesJson := []byte(data.Get("roles").(string))
+ if len(rolesJson) > 0 {
+ var rolesArray []interface{}
+ err := json.Unmarshal(rolesJson, &rolesArray)
+ if err != nil {
+ return nil, err
+ }
+ for _, rawRole := range rolesArray {
+ switch role := rawRole.(type) {
+ case string:
+ roles = append(roles, mongodbRole{Role: role})
+ case map[string]interface{}:
+ if db, ok := role["db"].(string); ok {
+ if roleName, ok := role["role"].(string); ok {
+ roles = append(roles, mongodbRole{Role: roleName, DB: db})
+ }
+ }
+ }
+ }
+ }
+
+ // Store it
+ entry, err := logical.StorageEntryJSON("role/"+name, &roleStorageEntry{
+ DB: roleDB,
+ MongoDBRoles: roles,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (roles mongodbRoles) toStandardRolesArray() []interface{} {
+ // Convert array of role documents like:
+ //
+ // [ { "role": "readWrite" }, { "role": "readWrite", "db": "test" } ]
+ //
+ // into a "standard" MongoDB roles array containing both strings and role documents:
+ //
+ // [ "readWrite", { "role": "readWrite", "db": "test" } ]
+ //
+ // MongoDB's createUser command accepts the latter.
+ //
+ var standardRolesArray []interface{}
+ for _, role := range roles {
+ if role.DB == "" {
+ standardRolesArray = append(standardRolesArray, role.Role)
+ } else {
+ standardRolesArray = append(standardRolesArray, role)
+ }
+ }
+ return standardRolesArray
+}
+
+type roleStorageEntry struct {
+ DB string `json:"db"`
+ MongoDBRoles mongodbRoles `json:"roles"`
+}
+
+type mongodbRole struct {
+ Role string `json:"role" bson:"role"`
+ DB string `json:"db" bson:"db"`
+}
+
+type mongodbRoles []mongodbRole
+
+const pathRoleHelpSyn = `
+Manage the roles used to generate MongoDB credentials.
+`
+
+const pathRoleHelpDesc = `
+This path lets you manage the roles used to generate MongoDB credentials.
+
+The "db" parameter specifies the authentication database for users
+generated for a given role.
+
+The "roles" parameter specifies the MongoDB roles that should be assigned
+to users created for a given role. Just like when creating a user directly
+using db.createUser, the roles JSON array can specify both built-in roles
+and user-defined roles for both the database the user is created in and
+for other databases.
+
+For example, the following roles JSON array grants the "readWrite"
+permission on both the user's authentication database and the "test"
+database:
+
+[ "readWrite", { "role": "readWrite", "db": "test" } ]
+
+Please consult the MongoDB documentation for more
+details on Role-Based Access Control in MongoDB.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/secret_creds.go b/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/secret_creds.go
new file mode 100644
index 0000000..6131910
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/secret_creds.go
@@ -0,0 +1,81 @@
+package mongodb
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ "gopkg.in/mgo.v2"
+)
+
+const SecretCredsType = "creds"
+
+func secretCreds(b *backend) *framework.Secret {
+ return &framework.Secret{
+ Type: SecretCredsType,
+ Fields: map[string]*framework.FieldSchema{
+ "username": {
+ Type: framework.TypeString,
+ Description: "Username",
+ },
+
+ "password": {
+ Type: framework.TypeString,
+ Description: "Password",
+ },
+ },
+
+ Renew: b.secretCredsRenew,
+ Revoke: b.secretCredsRevoke,
+ }
+}
+
+func (b *backend) secretCredsRenew(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ // Get the lease information
+ leaseConfig, err := b.LeaseConfig(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+ if leaseConfig == nil {
+ leaseConfig = &configLease{}
+ }
+
+ f := framework.LeaseExtend(leaseConfig.TTL, leaseConfig.MaxTTL, b.System())
+ return f(req, d)
+}
+
+func (b *backend) secretCredsRevoke(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ // Get the username from the internal data
+ usernameRaw, ok := req.Secret.InternalData["username"]
+ if !ok {
+ return nil, fmt.Errorf("secret is missing username internal data")
+ }
+ username, ok := usernameRaw.(string)
+ if !ok {
+ return nil, fmt.Errorf("username internal data is not a string")
+ }
+
+ // Get the db from the internal data
+ dbRaw, ok := req.Secret.InternalData["db"]
+ if !ok {
+ return nil, fmt.Errorf("secret is missing db internal data")
+ }
+ db, ok := dbRaw.(string)
+ if !ok {
+ return nil, fmt.Errorf("db internal data is not a string")
+ }
+
+ // Get our connection
+ session, err := b.Session(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+
+ // Drop the user
+ err = session.DB(db).RemoveUser(username)
+ if err != nil && err != mgo.ErrNotFound {
+ return nil, err
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/util.go b/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/util.go
new file mode 100644
index 0000000..209feb0
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/util.go
@@ -0,0 +1,81 @@
+package mongodb
+
+import (
+ "crypto/tls"
+ "errors"
+ "net"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "gopkg.in/mgo.v2"
+)
+
+// Unfortunately, mgo doesn't support the ssl parameter in its MongoDB URI parsing logic, so we have to handle that
+// ourselves. See https://github.com/go-mgo/mgo/issues/84
+func parseMongoURI(rawUri string) (*mgo.DialInfo, error) {
+ uri, err := url.Parse(rawUri)
+ if err != nil {
+ return nil, err
+ }
+
+ info := mgo.DialInfo{
+ Addrs: strings.Split(uri.Host, ","),
+ Database: strings.TrimPrefix(uri.Path, "/"),
+ Timeout: 10 * time.Second,
+ }
+
+ if uri.User != nil {
+ info.Username = uri.User.Username()
+ info.Password, _ = uri.User.Password()
+ }
+
+ query := uri.Query()
+ for key, values := range query {
+ var value string
+ if len(values) > 0 {
+ value = values[0]
+ }
+
+ switch key {
+ case "authSource":
+ info.Source = value
+ case "authMechanism":
+ info.Mechanism = value
+ case "gssapiServiceName":
+ info.Service = value
+ case "replicaSet":
+ info.ReplicaSetName = value
+ case "maxPoolSize":
+ poolLimit, err := strconv.Atoi(value)
+ if err != nil {
+ return nil, errors.New("bad value for maxPoolSize: " + value)
+ }
+ info.PoolLimit = poolLimit
+ case "ssl":
+ ssl, err := strconv.ParseBool(value)
+ if err != nil {
+ return nil, errors.New("bad value for ssl: " + value)
+ }
+ if ssl {
+ info.DialServer = func(addr *mgo.ServerAddr) (net.Conn, error) {
+ return tls.Dial("tcp", addr.String(), &tls.Config{})
+ }
+ }
+ case "connect":
+ if value == "direct" {
+ info.Direct = true
+ break
+ }
+ if value == "replicaSet" {
+ break
+ }
+ fallthrough
+ default:
+ return nil, errors.New("unsupported connection URL option: " + key + "=" + value)
+ }
+ }
+
+ return &info, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mssql/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/mssql/backend.go
new file mode 100644
index 0000000..61afe75
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/mssql/backend.go
@@ -0,0 +1,149 @@
+package mssql
+
+import (
+ "database/sql"
+ "fmt"
+ "strings"
+ "sync"
+
+ _ "github.com/denisenkom/go-mssqldb"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
+ return Backend().Setup(conf)
+}
+
+func Backend() *backend {
+ var b backend
+ b.Backend = &framework.Backend{
+ Help: strings.TrimSpace(backendHelp),
+
+ Paths: []*framework.Path{
+ pathConfigConnection(&b),
+ pathConfigLease(&b),
+ pathListRoles(&b),
+ pathRoles(&b),
+ pathCredsCreate(&b),
+ },
+
+ Secrets: []*framework.Secret{
+ secretCreds(&b),
+ },
+
+ Invalidate: b.invalidate,
+
+ Clean: b.ResetDB,
+ }
+
+ return &b
+}
+
+type backend struct {
+ *framework.Backend
+
+ db *sql.DB
+ defaultDb string
+ lock sync.Mutex
+}
+
+// DB returns the default database connection.
+func (b *backend) DB(s logical.Storage) (*sql.DB, error) {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ // If we already have a DB, we got it!
+ if b.db != nil {
+ if err := b.db.Ping(); err == nil {
+ return b.db, nil
+ }
+ // If the ping was unsuccessful, close it and ignore errors as we'll be
+ // reestablishing anyways
+ b.db.Close()
+ }
+
+ // Otherwise, attempt to make connection
+ entry, err := s.Get("config/connection")
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, fmt.Errorf("configure the DB connection with config/connection first")
+ }
+
+ var connConfig connectionConfig
+ if err := entry.DecodeJSON(&connConfig); err != nil {
+ return nil, err
+ }
+ connString := connConfig.ConnectionString
+
+ db, err := sql.Open("sqlserver", connString)
+ if err != nil {
+ return nil, err
+ }
+
+ // Set some connection pool settings. We don't need much of this,
+ // since the request rate shouldn't be high.
+ db.SetMaxOpenConns(connConfig.MaxOpenConnections)
+
+ stmt, err := db.Prepare("SELECT db_name();")
+ if err != nil {
+ return nil, err
+ }
+ defer stmt.Close()
+
+ err = stmt.QueryRow().Scan(&b.defaultDb)
+ if err != nil {
+ return nil, err
+ }
+
+ b.db = db
+ return b.db, nil
+}
+
+// ResetDB forces a connection next time DB() is called.
+func (b *backend) ResetDB() {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ if b.db != nil {
+ b.db.Close()
+ }
+
+ b.db = nil
+}
+
+func (b *backend) invalidate(key string) {
+ switch key {
+ case "config/connection":
+ b.ResetDB()
+ }
+}
+
+// LeaseConfig returns the lease configuration
+func (b *backend) LeaseConfig(s logical.Storage) (*configLease, error) {
+ entry, err := s.Get("config/lease")
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result configLease
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+const backendHelp = `
+The MSSQL backend dynamically generates database users.
+
+After mounting this backend, configure it using the endpoints within
+the "config/" path.
+
+This backend does not support Azure SQL Databases.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mssql/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/mssql/backend_test.go
new file mode 100644
index 0000000..329aeac
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/mssql/backend_test.go
@@ -0,0 +1,212 @@
+package mssql
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "testing"
+
+ "github.com/hashicorp/vault/logical"
+ logicaltest "github.com/hashicorp/vault/logical/testing"
+ "github.com/mitchellh/mapstructure"
+)
+
+func TestBackend_config_connection(t *testing.T) {
+ var resp *logical.Response
+ var err error
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ configData := map[string]interface{}{
+ "connection_string": "sample_connection_string",
+ "max_open_connections": 7,
+ "verify_connection": false,
+ }
+
+ configReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "config/connection",
+ Storage: config.StorageView,
+ Data: configData,
+ }
+ resp, err = b.HandleRequest(configReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, resp)
+ }
+
+ configReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(configReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, resp)
+ }
+
+ delete(configData, "verify_connection")
+ if !reflect.DeepEqual(configData, resp.Data) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v\n", configData, resp.Data)
+ }
+}
+
+func TestBackend_basic(t *testing.T) {
+ b, _ := Factory(logical.TestBackendConfig())
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ AcceptanceTest: true,
+ PreCheck: func() { testAccPreCheck(t) },
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepConfig(t),
+ testAccStepRole(t),
+ testAccStepReadCreds(t, "web"),
+ },
+ })
+}
+
+func TestBackend_roleCrud(t *testing.T) {
+ b := Backend()
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ AcceptanceTest: true,
+ PreCheck: func() { testAccPreCheck(t) },
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepConfig(t),
+ testAccStepRole(t),
+ testAccStepReadRole(t, "web", testRoleSQL),
+ testAccStepDeleteRole(t, "web"),
+ testAccStepReadRole(t, "web", ""),
+ },
+ })
+}
+
+func TestBackend_leaseWriteRead(t *testing.T) {
+ b := Backend()
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ AcceptanceTest: true,
+ PreCheck: func() { testAccPreCheck(t) },
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepConfig(t),
+ testAccStepWriteLease(t),
+ testAccStepReadLease(t),
+ },
+ })
+
+}
+
+func testAccPreCheck(t *testing.T) {
+ if v := os.Getenv("MSSQL_DSN"); v == "" {
+ t.Fatal("MSSQL_DSN must be set for acceptance tests")
+ }
+}
+
+func testAccStepConfig(t *testing.T) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config/connection",
+ Data: map[string]interface{}{
+ "connection_string": os.Getenv("MSSQL_DSN"),
+ },
+ }
+}
+
+func testAccStepRole(t *testing.T) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "roles/web",
+ Data: map[string]interface{}{
+ "sql": testRoleSQL,
+ },
+ }
+}
+
+func testAccStepDeleteRole(t *testing.T, n string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.DeleteOperation,
+ Path: "roles/" + n,
+ }
+}
+
+func testAccStepReadCreds(t *testing.T, name string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "creds/" + name,
+ Check: func(resp *logical.Response) error {
+ var d struct {
+ Username string `mapstructure:"username"`
+ Password string `mapstructure:"password"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+ log.Printf("[WARN] Generated credentials: %v", d)
+
+ return nil
+ },
+ }
+}
+
+func testAccStepReadRole(t *testing.T, name, sql string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "roles/" + name,
+ Check: func(resp *logical.Response) error {
+ if resp == nil {
+ if sql == "" {
+ return nil
+ }
+
+ return fmt.Errorf("bad: %#v", resp)
+ }
+
+ var d struct {
+ SQL string `mapstructure:"sql"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+
+ if d.SQL != sql {
+ return fmt.Errorf("bad: %#v", resp)
+ }
+
+ return nil
+ },
+ }
+}
+
+func testAccStepWriteLease(t *testing.T) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config/lease",
+ Data: map[string]interface{}{
+ "ttl": "1h5m",
+ "max_ttl": "24h",
+ },
+ }
+}
+
+func testAccStepReadLease(t *testing.T) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "config/lease",
+ Check: func(resp *logical.Response) error {
+ if resp.Data["ttl"] != "1h5m0s" || resp.Data["max_ttl"] != "24h0m0s" {
+ return fmt.Errorf("bad: %#v", resp)
+ }
+
+ return nil
+ },
+ }
+}
+
+const testRoleSQL = `
+CREATE LOGIN [{{name}}] WITH PASSWORD = '{{password}}';
+CREATE USER [{{name}}] FOR LOGIN [{{name}}];
+GRANT SELECT ON SCHEMA::dbo TO [{{name}}]
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mssql/path_config_connection.go b/vendor/github.com/hashicorp/vault/builtin/logical/mssql/path_config_connection.go
new file mode 100644
index 0000000..5caed56
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/mssql/path_config_connection.go
@@ -0,0 +1,124 @@
+package mssql
+
+import (
+ "database/sql"
+ "fmt"
+
+ "github.com/fatih/structs"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathConfigConnection(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "config/connection",
+ Fields: map[string]*framework.FieldSchema{
+ "connection_string": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "DB connection parameters",
+ },
+ "max_open_connections": &framework.FieldSchema{
+ Type: framework.TypeInt,
+ Description: "Maximum number of open connections to database",
+ },
+ "verify_connection": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: true,
+ Description: "If set, connection_string is verified by actually connecting to the database",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathConnectionWrite,
+ logical.ReadOperation: b.pathConnectionRead,
+ },
+
+ HelpSynopsis: pathConfigConnectionHelpSyn,
+ HelpDescription: pathConfigConnectionHelpDesc,
+ }
+}
+
+// pathConnectionRead reads out the connection configuration
+func (b *backend) pathConnectionRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ entry, err := req.Storage.Get("config/connection")
+ if err != nil {
+ return nil, fmt.Errorf("failed to read connection configuration")
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var config connectionConfig
+ if err := entry.DecodeJSON(&config); err != nil {
+ return nil, err
+ }
+ return &logical.Response{
+ Data: structs.New(config).Map(),
+ }, nil
+}
+
+// pathConnectionWrite stores the connection configuration
+func (b *backend) pathConnectionWrite(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ connString := data.Get("connection_string").(string)
+
+ maxOpenConns := data.Get("max_open_connections").(int)
+ if maxOpenConns == 0 {
+ maxOpenConns = 2
+ }
+
+ // Don't check the connection_string if verification is disabled
+ verifyConnection := data.Get("verify_connection").(bool)
+ if verifyConnection {
+ // Verify the string
+ db, err := sql.Open("mssql", connString)
+
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Error validating connection info: %s", err)), nil
+ }
+ defer db.Close()
+ if err := db.Ping(); err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Error validating connection info: %s", err)), nil
+ }
+ }
+
+ // Store it
+ entry, err := logical.StorageEntryJSON("config/connection", connectionConfig{
+ ConnectionString: connString,
+ MaxOpenConnections: maxOpenConns,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ // Reset the DB connection
+ b.ResetDB()
+
+ resp := &logical.Response{}
+ resp.AddWarning("Read access to this endpoint should be controlled via ACLs as it will return the connection string as it is, including passwords, if any.")
+
+ return resp, nil
+}
+
+type connectionConfig struct {
+ ConnectionString string `json:"connection_string" structs:"connection_string" mapstructure:"connection_string"`
+ MaxOpenConnections int `json:"max_open_connections" structs:"max_open_connections" mapstructure:"max_open_connections"`
+}
+
+const pathConfigConnectionHelpSyn = `
+Configure the connection string to talk to Microsoft Sql Server.
+`
+
+const pathConfigConnectionHelpDesc = `
+This path configures the connection string used to connect to Sql Server.
+The value of the string is a Data Source Name (DSN). An example is
+using "server=;port=;user id=;password=;database=;app name=vault;"
+
+When configuring the connection string, the backend will verify its validity.
+If the database is not available when setting the connection string, set the
+"verify_connection" option to false.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mssql/path_config_lease.go b/vendor/github.com/hashicorp/vault/builtin/logical/mssql/path_config_lease.go
new file mode 100644
index 0000000..f4ab92e
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/mssql/path_config_lease.go
@@ -0,0 +1,116 @@
+package mssql
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathConfigLease(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "config/lease",
+ Fields: map[string]*framework.FieldSchema{
+ "ttl": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Default ttl for roles.",
+ },
+
+ "ttl_max": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `Deprecated: use "max_ttl" instead. Maximum
+time a credential is valid for.`,
+ },
+
+ "max_ttl": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Maximum time a credential is valid for.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathConfigLeaseRead,
+ logical.UpdateOperation: b.pathConfigLeaseWrite,
+ },
+
+ HelpSynopsis: pathConfigLeaseHelpSyn,
+ HelpDescription: pathConfigLeaseHelpDesc,
+ }
+}
+
+func (b *backend) pathConfigLeaseWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ ttlRaw := d.Get("ttl").(string)
+ ttlMaxRaw := d.Get("max_ttl").(string)
+ if len(ttlMaxRaw) == 0 {
+ ttlMaxRaw = d.Get("ttl_max").(string)
+ }
+
+ ttl, err := time.ParseDuration(ttlRaw)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Invalid ttl: %s", err)), nil
+ }
+ ttlMax, err := time.ParseDuration(ttlMaxRaw)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Invalid max_ttl: %s", err)), nil
+ }
+
+ // Store it
+ entry, err := logical.StorageEntryJSON("config/lease", &configLease{
+ TTL: ttl,
+ TTLMax: ttlMax,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *backend) pathConfigLeaseRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ leaseConfig, err := b.LeaseConfig(req.Storage)
+
+ if err != nil {
+ return nil, err
+ }
+ if leaseConfig == nil {
+ return nil, nil
+ }
+
+ resp := &logical.Response{
+ Data: map[string]interface{}{
+ "ttl": leaseConfig.TTL.String(),
+ "ttl_max": leaseConfig.TTLMax.String(),
+ "max_ttl": leaseConfig.TTLMax.String(),
+ },
+ }
+ resp.AddWarning("The field ttl_max is deprecated and will be removed in a future release. Use max_ttl instead.")
+
+ return resp, nil
+}
+
+type configLease struct {
+ TTL time.Duration
+ TTLMax time.Duration
+}
+
+const pathConfigLeaseHelpSyn = `
+Configure the default lease ttl for generated credentials.
+`
+
+const pathConfigLeaseHelpDesc = `
+This configures the default lease ttl used for credentials
+generated by this backend. The ttl specifies the duration that a
+credential will be valid for, as well as the maximum session for
+a set of credentials.
+
+The format for the ttl is "1h" or integer and then unit. The longest
+unit is hour.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mssql/path_creds_create.go b/vendor/github.com/hashicorp/vault/builtin/logical/mssql/path_creds_create.go
new file mode 100644
index 0000000..f9baae7
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/mssql/path_creds_create.go
@@ -0,0 +1,130 @@
+package mssql
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathCredsCreate(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "creds/" + framework.GenericNameRegex("name"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathCredsCreateRead,
+ },
+
+ HelpSynopsis: pathCredsCreateHelpSyn,
+ HelpDescription: pathCredsCreateHelpDesc,
+ }
+}
+
+func (b *backend) pathCredsCreateRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ name := data.Get("name").(string)
+
+ // Get the role
+ role, err := b.Role(req.Storage, name)
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return logical.ErrorResponse(fmt.Sprintf("unknown role: %s", name)), nil
+ }
+
+ // Determine if we have a lease configuration
+ leaseConfig, err := b.LeaseConfig(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+ if leaseConfig == nil {
+ leaseConfig = &configLease{}
+ }
+
+ // Generate our username and password
+ displayName := req.DisplayName
+ if len(displayName) > 10 {
+ displayName = displayName[:10]
+ }
+ userUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ return nil, err
+ }
+ username := fmt.Sprintf("%s-%s", displayName, userUUID)
+ password, err := uuid.GenerateUUID()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get our handle
+ db, err := b.DB(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+
+ // Start a transaction
+ tx, err := db.Begin()
+ if err != nil {
+ return nil, err
+ }
+ defer tx.Rollback()
+
+ // Always reset database to default db of connection. Since it is in a
+ // transaction, all statements will be on the same connection in the pool.
+ roleSQL := fmt.Sprintf("USE [%s]; %s", b.defaultDb, role.SQL)
+
+ // Execute each query
+ for _, query := range strutil.ParseArbitraryStringSlice(roleSQL, ";") {
+ query = strings.TrimSpace(query)
+ if len(query) == 0 {
+ continue
+ }
+
+ stmt, err := tx.Prepare(Query(query, map[string]string{
+ "name": username,
+ "password": password,
+ }))
+ if err != nil {
+ return nil, err
+ }
+ defer stmt.Close()
+ if _, err := stmt.Exec(); err != nil {
+ return nil, err
+ }
+ }
+
+ // Commit the transaction
+ if err := tx.Commit(); err != nil {
+ return nil, err
+ }
+
+ // Return the secret
+ resp := b.Secret(SecretCredsType).Response(map[string]interface{}{
+ "username": username,
+ "password": password,
+ }, map[string]interface{}{
+ "username": username,
+ })
+ resp.Secret.TTL = leaseConfig.TTL
+ return resp, nil
+}
+
+const pathCredsCreateHelpSyn = `
+Request database credentials for a certain role.
+`
+
+const pathCredsCreateHelpDesc = `
+This path reads database credentials for a certain role. The
+database credentials will be generated on demand and will be automatically
+revoked when the lease is up.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mssql/path_roles.go b/vendor/github.com/hashicorp/vault/builtin/logical/mssql/path_roles.go
new file mode 100644
index 0000000..4229444
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/mssql/path_roles.go
@@ -0,0 +1,175 @@
+package mssql
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathListRoles(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "roles/?$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.pathRoleList,
+ },
+
+ HelpSynopsis: pathRoleHelpSyn,
+ HelpDescription: pathRoleHelpDesc,
+ }
+}
+
+func pathRoles(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "roles/" + framework.GenericNameRegex("name"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role.",
+ },
+
+ "sql": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "SQL string to create a role. See help for more info.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathRoleRead,
+ logical.UpdateOperation: b.pathRoleCreate,
+ logical.DeleteOperation: b.pathRoleDelete,
+ },
+
+ HelpSynopsis: pathRoleHelpSyn,
+ HelpDescription: pathRoleHelpDesc,
+ }
+}
+
+func (b *backend) Role(s logical.Storage, n string) (*roleEntry, error) {
+ entry, err := s.Get("role/" + n)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result roleEntry
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+func (b *backend) pathRoleDelete(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ err := req.Storage.Delete("role/" + data.Get("name").(string))
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *backend) pathRoleRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ role, err := b.Role(req.Storage, data.Get("name").(string))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "sql": role.SQL,
+ },
+ }, nil
+}
+
+func (b *backend) pathRoleList(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ entries, err := req.Storage.List("role/")
+ if err != nil {
+ return nil, err
+ }
+
+ return logical.ListResponse(entries), nil
+}
+
+func (b *backend) pathRoleCreate(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ name := data.Get("name").(string)
+ sql := data.Get("sql").(string)
+
+ // Get our connection
+ db, err := b.DB(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+
+ // Test the query by trying to prepare it
+ for _, query := range strutil.ParseArbitraryStringSlice(sql, ";") {
+ query = strings.TrimSpace(query)
+ if len(query) == 0 {
+ continue
+ }
+
+ stmt, err := db.Prepare(Query(query, map[string]string{
+ "name": "foo",
+ "password": "bar",
+ }))
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Error testing query: %s", err)), nil
+ }
+ stmt.Close()
+ }
+
+ // Store it
+ entry, err := logical.StorageEntryJSON("role/"+name, &roleEntry{
+ SQL: sql,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+ return nil, nil
+}
+
+type roleEntry struct {
+ SQL string `json:"sql"`
+}
+
+const pathRoleHelpSyn = `
+Manage the roles that can be created with this backend.
+`
+
+const pathRoleHelpDesc = `
+This path lets you manage the roles that can be created with this backend.
+
+The "sql" parameter customizes the SQL string used to create the login to
+the server. The parameter can be a sequence of SQL queries, each semi-colon
+seperated. Some substitution will be done to the SQL string for certain keys.
+The names of the variables must be surrounded by "{{" and "}}" to be replaced.
+
+ * "name" - The random username generated for the DB user.
+
+ * "password" - The random password generated for the DB user.
+
+Example SQL query to use:
+
+ CREATE LOGIN [{{name}}] WITH PASSWORD = '{{password}}';
+ CREATE USER [{{name}}] FROM LOGIN [{{name}}];
+ GRANT SELECT, UPDATE, DELETE, INSERT on SCHEMA::dbo TO [{{name}}];
+
+Please see the Microsoft SQL Server manual on the GRANT command to learn how to
+do more fine grained access.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mssql/secret_creds.go b/vendor/github.com/hashicorp/vault/builtin/logical/mssql/secret_creds.go
new file mode 100644
index 0000000..b870c59
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/mssql/secret_creds.go
@@ -0,0 +1,183 @@
+package mssql
+
+import (
+ "database/sql"
+ "fmt"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+const SecretCredsType = "creds"
+
+func secretCreds(b *backend) *framework.Secret {
+ return &framework.Secret{
+ Type: SecretCredsType,
+ Fields: map[string]*framework.FieldSchema{
+ "username": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Username",
+ },
+
+ "password": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Password",
+ },
+ },
+
+ Renew: b.secretCredsRenew,
+ Revoke: b.secretCredsRevoke,
+ }
+}
+
+func (b *backend) secretCredsRenew(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ // Get the lease information
+ leaseConfig, err := b.LeaseConfig(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+ if leaseConfig == nil {
+ leaseConfig = &configLease{}
+ }
+
+ f := framework.LeaseExtend(leaseConfig.TTL, leaseConfig.TTLMax, b.System())
+ return f(req, d)
+}
+
+func (b *backend) secretCredsRevoke(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ // Get the username from the internal data
+ usernameRaw, ok := req.Secret.InternalData["username"]
+ if !ok {
+ return nil, fmt.Errorf("secret is missing username internal data")
+ }
+ username, ok := usernameRaw.(string)
+
+ // Get our connection
+ db, err := b.DB(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+
+ // First disable server login
+ disableStmt, err := db.Prepare(fmt.Sprintf("ALTER LOGIN [%s] DISABLE;", username))
+ if err != nil {
+ return nil, err
+ }
+ defer disableStmt.Close()
+ if _, err := disableStmt.Exec(); err != nil {
+ return nil, err
+ }
+
+ // Query for sessions for the login so that we can kill any outstanding
+ // sessions. There cannot be any active sessions before we drop the logins
+ // This isn't done in a transaction because even if we fail along the way,
+ // we want to remove as much access as possible
+ sessionStmt, err := db.Prepare(fmt.Sprintf(
+ "SELECT session_id FROM sys.dm_exec_sessions WHERE login_name = '%s';", username))
+ if err != nil {
+ return nil, err
+ }
+ defer sessionStmt.Close()
+
+ sessionRows, err := sessionStmt.Query()
+ if err != nil {
+ return nil, err
+ }
+ defer sessionRows.Close()
+
+ var revokeStmts []string
+ for sessionRows.Next() {
+ var sessionID int
+ err = sessionRows.Scan(&sessionID)
+ if err != nil {
+ return nil, err
+ }
+ revokeStmts = append(revokeStmts, fmt.Sprintf("KILL %d;", sessionID))
+ }
+
+ // Query for database users using undocumented stored procedure for now since
+ // it is the easiest way to get this information;
+ // we need to drop the database users before we can drop the login and the role
+ // This isn't done in a transaction because even if we fail along the way,
+ // we want to remove as much access as possible
+ stmt, err := db.Prepare(fmt.Sprintf("EXEC master.dbo.sp_msloginmappings '%s';", username))
+ if err != nil {
+ return nil, err
+ }
+ defer stmt.Close()
+
+ rows, err := stmt.Query()
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ for rows.Next() {
+ var loginName, dbName, qUsername string
+ var aliasName sql.NullString
+ err = rows.Scan(&loginName, &dbName, &qUsername, &aliasName)
+ if err != nil {
+ return nil, err
+ }
+ revokeStmts = append(revokeStmts, fmt.Sprintf(dropUserSQL, dbName, username, username))
+ }
+
+ // we do not stop on error, as we want to remove as
+ // many permissions as possible right now
+ var lastStmtError error
+ for _, query := range revokeStmts {
+ stmt, err := db.Prepare(query)
+ if err != nil {
+ lastStmtError = err
+ continue
+ }
+ defer stmt.Close()
+ _, err = stmt.Exec()
+ if err != nil {
+ lastStmtError = err
+ }
+ }
+
+ // can't drop if not all database users are dropped
+ if rows.Err() != nil {
+ return nil, fmt.Errorf("cound not generate sql statements for all rows: %s", rows.Err())
+ }
+ if lastStmtError != nil {
+ return nil, fmt.Errorf("could not perform all sql statements: %s", lastStmtError)
+ }
+
+ // Drop this login
+ stmt, err = db.Prepare(fmt.Sprintf(dropLoginSQL, username, username))
+ if err != nil {
+ return nil, err
+ }
+ defer stmt.Close()
+ if _, err := stmt.Exec(); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+const dropUserSQL = `
+USE [%s]
+IF EXISTS
+ (SELECT name
+ FROM sys.database_principals
+ WHERE name = N'%s')
+BEGIN
+ DROP USER [%s]
+END
+`
+
+const dropLoginSQL = `
+IF EXISTS
+ (SELECT name
+ FROM master.sys.server_principals
+ WHERE name = N'%s')
+BEGIN
+ DROP LOGIN [%s]
+END
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mssql/util.go b/vendor/github.com/hashicorp/vault/builtin/logical/mssql/util.go
new file mode 100644
index 0000000..362cbd3
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/mssql/util.go
@@ -0,0 +1,28 @@
+package mssql
+
+import (
+ "fmt"
+ "strings"
+)
+
+// SplitSQL is used to split a series of SQL statements
+func SplitSQL(sql string) []string {
+ parts := strings.Split(sql, ";")
+ out := make([]string, 0, len(parts))
+ for _, p := range parts {
+ clean := strings.TrimSpace(p)
+ if len(clean) > 0 {
+ out = append(out, clean)
+ }
+ }
+ return out
+}
+
+// Query templates a query for us.
+func Query(tpl string, data map[string]string) string {
+ for k, v := range data {
+ tpl = strings.Replace(tpl, fmt.Sprintf("{{%s}}", k), v, -1)
+ }
+
+ return tpl
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/backend.go
new file mode 100644
index 0000000..7ae0335
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/backend.go
@@ -0,0 +1,140 @@
+package mysql
+
+import (
+ "database/sql"
+ "fmt"
+ "strings"
+ "sync"
+
+ _ "github.com/go-sql-driver/mysql"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
+ return Backend().Setup(conf)
+}
+
+func Backend() *backend {
+ var b backend
+ b.Backend = &framework.Backend{
+ Help: strings.TrimSpace(backendHelp),
+
+ Paths: []*framework.Path{
+ pathConfigConnection(&b),
+ pathConfigLease(&b),
+ pathListRoles(&b),
+ pathRoles(&b),
+ pathRoleCreate(&b),
+ },
+
+ Secrets: []*framework.Secret{
+ secretCreds(&b),
+ },
+
+ Invalidate: b.invalidate,
+
+ Clean: b.ResetDB,
+ }
+
+ return &b
+}
+
+type backend struct {
+ *framework.Backend
+
+ db *sql.DB
+ lock sync.Mutex
+}
+
+// DB returns the database connection.
+func (b *backend) DB(s logical.Storage) (*sql.DB, error) {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ // If we already have a DB, we got it!
+ if b.db != nil {
+ if err := b.db.Ping(); err == nil {
+ return b.db, nil
+ }
+ // If the ping was unsuccessful, close it and ignore errors as we'll be
+ // reestablishing anyways
+ b.db.Close()
+ }
+
+ // Otherwise, attempt to make connection
+ entry, err := s.Get("config/connection")
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil,
+ fmt.Errorf("configure the DB connection with config/connection first")
+ }
+
+ var connConfig connectionConfig
+ if err := entry.DecodeJSON(&connConfig); err != nil {
+ return nil, err
+ }
+
+ conn := connConfig.ConnectionURL
+ if len(conn) == 0 {
+ conn = connConfig.ConnectionString
+ }
+
+ b.db, err = sql.Open("mysql", conn)
+ if err != nil {
+ return nil, err
+ }
+
+ // Set some connection pool settings. We don't need much of this,
+ // since the request rate shouldn't be high.
+ b.db.SetMaxOpenConns(connConfig.MaxOpenConnections)
+ b.db.SetMaxIdleConns(connConfig.MaxIdleConnections)
+
+ return b.db, nil
+}
+
+// ResetDB forces a connection next time DB() is called.
+func (b *backend) ResetDB() {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ if b.db != nil {
+ b.db.Close()
+ }
+
+ b.db = nil
+}
+
+func (b *backend) invalidate(key string) {
+ switch key {
+ case "config/connection":
+ b.ResetDB()
+ }
+}
+
+// Lease returns the lease information
+func (b *backend) Lease(s logical.Storage) (*configLease, error) {
+ entry, err := s.Get("config/lease")
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result configLease
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+const backendHelp = `
+The MySQL backend dynamically generates database users.
+
+After mounting this backend, configure it using the endpoints within
+the "config/" path.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/backend_test.go
new file mode 100644
index 0000000..2a0a1fc
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/backend_test.go
@@ -0,0 +1,368 @@
+package mysql
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+ logicaltest "github.com/hashicorp/vault/logical/testing"
+ "github.com/mitchellh/mapstructure"
+ dockertest "gopkg.in/ory-am/dockertest.v2"
+)
+
+var (
+ testImagePull sync.Once
+)
+
+func prepareTestContainer(t *testing.T, s logical.Storage, b logical.Backend) (cid dockertest.ContainerID, retURL string) {
+ if os.Getenv("MYSQL_DSN") != "" {
+ return "", os.Getenv("MYSQL_DSN")
+ }
+
+ // Without this the checks for whether the container has started seem to
+ // never actually pass. There's really no reason to expose the test
+ // containers, so don't.
+ dockertest.BindDockerToLocalhost = "yep"
+
+ testImagePull.Do(func() {
+ dockertest.Pull("mysql")
+ })
+
+ cid, connErr := dockertest.ConnectToMySQL(60, 500*time.Millisecond, func(connURL string) bool {
+ // This will cause a validation to run
+ resp, err := b.HandleRequest(&logical.Request{
+ Storage: s,
+ Operation: logical.UpdateOperation,
+ Path: "config/connection",
+ Data: map[string]interface{}{
+ "connection_url": connURL,
+ },
+ })
+ if err != nil || (resp != nil && resp.IsError()) {
+ // It's likely not up and running yet, so return false and try again
+ return false
+ }
+ if resp == nil {
+ t.Fatal("expected warning")
+ }
+
+ retURL = connURL
+ return true
+ })
+
+ if connErr != nil {
+ t.Fatalf("could not connect to database: %v", connErr)
+ }
+
+ return
+}
+
+func cleanupTestContainer(t *testing.T, cid dockertest.ContainerID) {
+ err := cid.KillRemove()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestBackend_config_connection(t *testing.T) {
+ var resp *logical.Response
+ var err error
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ configData := map[string]interface{}{
+ "value": "",
+ "connection_url": "sample_connection_url",
+ "max_open_connections": 9,
+ "max_idle_connections": 7,
+ "verify_connection": false,
+ }
+
+ configReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "config/connection",
+ Storage: config.StorageView,
+ Data: configData,
+ }
+ resp, err = b.HandleRequest(configReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, resp)
+ }
+
+ configReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(configReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, resp)
+ }
+
+ delete(configData, "verify_connection")
+ if !reflect.DeepEqual(configData, resp.Data) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v\n", configData, resp.Data)
+ }
+}
+
+func TestBackend_basic(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cid, connURL := prepareTestContainer(t, config.StorageView, b)
+ if cid != "" {
+ defer cleanupTestContainer(t, cid)
+ }
+ connData := map[string]interface{}{
+ "connection_url": connURL,
+ }
+
+ // for wildcard based mysql user
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepConfig(t, connData, false),
+ testAccStepRole(t, true),
+ testAccStepReadCreds(t, "web"),
+ },
+ })
+}
+
+func TestBackend_basicHostRevoke(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cid, connURL := prepareTestContainer(t, config.StorageView, b)
+ if cid != "" {
+ defer cleanupTestContainer(t, cid)
+ }
+ connData := map[string]interface{}{
+ "connection_url": connURL,
+ }
+
+ // for host based mysql user
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepConfig(t, connData, false),
+ testAccStepRole(t, false),
+ testAccStepReadCreds(t, "web"),
+ },
+ })
+}
+
+func TestBackend_roleCrud(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cid, connURL := prepareTestContainer(t, config.StorageView, b)
+ if cid != "" {
+ defer cleanupTestContainer(t, cid)
+ }
+ connData := map[string]interface{}{
+ "connection_url": connURL,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepConfig(t, connData, false),
+ // test SQL with wildcard based user
+ testAccStepRole(t, true),
+ testAccStepReadRole(t, "web", testRoleWildCard),
+ testAccStepDeleteRole(t, "web"),
+ // test SQL with host based user
+ testAccStepRole(t, false),
+ testAccStepReadRole(t, "web", testRoleHost),
+ testAccStepDeleteRole(t, "web"),
+ },
+ })
+}
+
+func TestBackend_leaseWriteRead(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cid, connURL := prepareTestContainer(t, config.StorageView, b)
+ if cid != "" {
+ defer cleanupTestContainer(t, cid)
+ }
+ connData := map[string]interface{}{
+ "connection_url": connURL,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepConfig(t, connData, false),
+ testAccStepWriteLease(t),
+ testAccStepReadLease(t),
+ },
+ })
+
+}
+
+func testAccStepConfig(t *testing.T, d map[string]interface{}, expectError bool) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config/connection",
+ Data: d,
+ ErrorOk: true,
+ Check: func(resp *logical.Response) error {
+ if expectError {
+ if resp.Data == nil {
+ return fmt.Errorf("data is nil")
+ }
+ var e struct {
+ Error string `mapstructure:"error"`
+ }
+ if err := mapstructure.Decode(resp.Data, &e); err != nil {
+ return err
+ }
+ if len(e.Error) == 0 {
+ return fmt.Errorf("expected error, but write succeeded")
+ }
+ return nil
+ } else if resp != nil && resp.IsError() {
+ return fmt.Errorf("got an error response: %v", resp.Error())
+ }
+ return nil
+ },
+ }
+}
+
+func testAccStepRole(t *testing.T, wildCard bool) logicaltest.TestStep {
+
+ pathData := make(map[string]interface{})
+ if wildCard == true {
+ pathData = map[string]interface{}{
+ "sql": testRoleWildCard,
+ }
+ } else {
+ pathData = map[string]interface{}{
+ "sql": testRoleHost,
+ "revocation_sql": testRevocationSQL,
+ }
+ }
+
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "roles/web",
+ Data: pathData,
+ }
+
+}
+
+func testAccStepDeleteRole(t *testing.T, n string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.DeleteOperation,
+ Path: "roles/" + n,
+ }
+}
+
+func testAccStepReadCreds(t *testing.T, name string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "creds/" + name,
+ Check: func(resp *logical.Response) error {
+ var d struct {
+ Username string `mapstructure:"username"`
+ Password string `mapstructure:"password"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+ log.Printf("[WARN] Generated credentials: %v", d)
+
+ return nil
+ },
+ }
+}
+
+func testAccStepReadRole(t *testing.T, name string, sql string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "roles/" + name,
+ Check: func(resp *logical.Response) error {
+ if resp == nil {
+ if sql == "" {
+ return nil
+ }
+
+ return fmt.Errorf("bad: %#v", resp)
+ }
+
+ var d struct {
+ SQL string `mapstructure:"sql"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+
+ if d.SQL != sql {
+ return fmt.Errorf("bad: %#v", resp)
+ }
+
+ return nil
+ },
+ }
+}
+
+func testAccStepWriteLease(t *testing.T) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config/lease",
+ Data: map[string]interface{}{
+ "lease": "1h5m",
+ "lease_max": "24h",
+ },
+ }
+}
+
+func testAccStepReadLease(t *testing.T) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "config/lease",
+ Check: func(resp *logical.Response) error {
+ if resp.Data["lease"] != "1h5m0s" || resp.Data["lease_max"] != "24h0m0s" {
+ return fmt.Errorf("bad: %#v", resp)
+ }
+
+ return nil
+ },
+ }
+}
+
+const testRoleWildCard = `
+CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';
+GRANT SELECT ON *.* TO '{{name}}'@'%';
+`
+const testRoleHost = `
+CREATE USER '{{name}}'@'10.1.1.2' IDENTIFIED BY '{{password}}';
+GRANT SELECT ON *.* TO '{{name}}'@'10.1.1.2';
+`
+const testRevocationSQL = `
+REVOKE ALL PRIVILEGES, GRANT OPTION FROM '{{name}}'@'10.1.1.2';
+DROP USER '{{name}}'@'10.1.1.2';
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/path_config_connection.go b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/path_config_connection.go
new file mode 100644
index 0000000..8dd44dd
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/path_config_connection.go
@@ -0,0 +1,157 @@
+package mysql
+
+import (
+ "database/sql"
+ "fmt"
+
+ "github.com/fatih/structs"
+ _ "github.com/go-sql-driver/mysql"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathConfigConnection(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "config/connection",
+ Fields: map[string]*framework.FieldSchema{
+ "connection_url": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "DB connection string",
+ },
+ "value": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `DB connection string. Use 'connection_url' instead.
+This name is deprecated.`,
+ },
+ "max_open_connections": &framework.FieldSchema{
+ Type: framework.TypeInt,
+ Description: "Maximum number of open connections to database",
+ },
+ "max_idle_connections": &framework.FieldSchema{
+ Type: framework.TypeInt,
+ Description: "Maximum number of idle connections to the database; a zero uses the value of max_open_connections and a negative value disables idle connections. If larger than max_open_connections it will be reduced to the same size.",
+ },
+ "verify_connection": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: true,
+ Description: "If set, connection_url is verified by actually connecting to the database",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathConnectionWrite,
+ logical.ReadOperation: b.pathConnectionRead,
+ },
+
+ HelpSynopsis: pathConfigConnectionHelpSyn,
+ HelpDescription: pathConfigConnectionHelpDesc,
+ }
+}
+
+// pathConnectionRead reads out the connection configuration
+func (b *backend) pathConnectionRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ entry, err := req.Storage.Get("config/connection")
+ if err != nil {
+ return nil, fmt.Errorf("failed to read connection configuration")
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var config connectionConfig
+ if err := entry.DecodeJSON(&config); err != nil {
+ return nil, err
+ }
+ return &logical.Response{
+ Data: structs.New(config).Map(),
+ }, nil
+}
+
+func (b *backend) pathConnectionWrite(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ connValue := data.Get("value").(string)
+ connURL := data.Get("connection_url").(string)
+ if connURL == "" {
+ if connValue == "" {
+ return logical.ErrorResponse("the connection_url parameter must be supplied"), nil
+ } else {
+ connURL = connValue
+ }
+ }
+
+ maxOpenConns := data.Get("max_open_connections").(int)
+ if maxOpenConns == 0 {
+ maxOpenConns = 2
+ }
+
+ maxIdleConns := data.Get("max_idle_connections").(int)
+ if maxIdleConns == 0 {
+ maxIdleConns = maxOpenConns
+ }
+ if maxIdleConns > maxOpenConns {
+ maxIdleConns = maxOpenConns
+ }
+
+ // Don't check the connection_url if verification is disabled
+ verifyConnection := data.Get("verify_connection").(bool)
+ if verifyConnection {
+ // Verify the string
+ db, err := sql.Open("mysql", connURL)
+
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "error validating connection info: %s", err)), nil
+ }
+ defer db.Close()
+ if err := db.Ping(); err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "error validating connection info: %s", err)), nil
+ }
+ }
+
+ // Store it
+ entry, err := logical.StorageEntryJSON("config/connection", connectionConfig{
+ ConnectionURL: connURL,
+ MaxOpenConnections: maxOpenConns,
+ MaxIdleConnections: maxIdleConns,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ // Reset the DB connection
+ b.ResetDB()
+
+ resp := &logical.Response{}
+ resp.AddWarning("Read access to this endpoint should be controlled via ACLs as it will return the connection URL as it is, including passwords, if any.")
+
+ return resp, nil
+}
+
+type connectionConfig struct {
+ ConnectionURL string `json:"connection_url" structs:"connection_url" mapstructure:"connection_url"`
+ // Deprecate "value" in coming releases
+ ConnectionString string `json:"value" structs:"value" mapstructure:"value"`
+ MaxOpenConnections int `json:"max_open_connections" structs:"max_open_connections" mapstructure:"max_open_connections"`
+ MaxIdleConnections int `json:"max_idle_connections" structs:"max_idle_connections" mapstructure:"max_idle_connections"`
+}
+
+const pathConfigConnectionHelpSyn = `
+Configure the connection string to talk to MySQL.
+`
+
+const pathConfigConnectionHelpDesc = `
+This path configures the connection string used to connect to MySQL. The value
+of the string is a Data Source Name (DSN). An example is using
+"username:password@protocol(address)/dbname?param=value"
+
+For example, RDS may look like:
+"id:password@tcp(your-amazonaws-uri.com:3306)/dbname"
+
+When configuring the connection string, the backend will verify its validity.
+If the database is not available when setting the connection URL, set the
+"verify_connection" option to false.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/path_config_lease.go b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/path_config_lease.go
new file mode 100644
index 0000000..f386b60
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/path_config_lease.go
@@ -0,0 +1,103 @@
+package mysql
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathConfigLease(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "config/lease",
+ Fields: map[string]*framework.FieldSchema{
+ "lease": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Default lease for roles.",
+ },
+
+ "lease_max": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Maximum time a credential is valid for.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathLeaseRead,
+ logical.UpdateOperation: b.pathLeaseWrite,
+ },
+
+ HelpSynopsis: pathConfigLeaseHelpSyn,
+ HelpDescription: pathConfigLeaseHelpDesc,
+ }
+}
+
+func (b *backend) pathLeaseWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ leaseRaw := d.Get("lease").(string)
+ leaseMaxRaw := d.Get("lease_max").(string)
+
+ lease, err := time.ParseDuration(leaseRaw)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Invalid lease: %s", err)), nil
+ }
+ leaseMax, err := time.ParseDuration(leaseMaxRaw)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Invalid lease: %s", err)), nil
+ }
+
+ // Store it
+ entry, err := logical.StorageEntryJSON("config/lease", &configLease{
+ Lease: lease,
+ LeaseMax: leaseMax,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *backend) pathLeaseRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ lease, err := b.Lease(req.Storage)
+
+ if err != nil {
+ return nil, err
+ }
+ if lease == nil {
+ return nil, nil
+ }
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "lease": lease.Lease.String(),
+ "lease_max": lease.LeaseMax.String(),
+ },
+ }, nil
+}
+
+type configLease struct {
+ Lease time.Duration
+ LeaseMax time.Duration
+}
+
+const pathConfigLeaseHelpSyn = `
+Configure the default lease information for generated credentials.
+`
+
+const pathConfigLeaseHelpDesc = `
+This configures the default lease information used for credentials
+generated by this backend. The lease specifies the duration that a
+credential will be valid for, as well as the maximum session for
+a set of credentials.
+
+The format for the lease is "1h" or integer and then unit. The longest
+unit is hour.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/path_role_create.go b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/path_role_create.go
new file mode 100644
index 0000000..7e11657
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/path_role_create.go
@@ -0,0 +1,144 @@
+package mysql
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ _ "github.com/lib/pq"
+)
+
+func pathRoleCreate(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "creds/" + framework.GenericNameRegex("name"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathRoleCreateRead,
+ },
+
+ HelpSynopsis: pathRoleCreateReadHelpSyn,
+ HelpDescription: pathRoleCreateReadHelpDesc,
+ }
+}
+
+func (b *backend) pathRoleCreateRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ name := data.Get("name").(string)
+
+ // Get the role
+ role, err := b.Role(req.Storage, name)
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return logical.ErrorResponse(fmt.Sprintf("unknown role: %s", name)), nil
+ }
+
+ // Determine if we have a lease
+ lease, err := b.Lease(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+ if lease == nil {
+ lease = &configLease{}
+ }
+
+ // Generate our username and password. The username will be a
+ // concatenation of:
+ //
+ // - the role name, truncated to role.rolenameLength (default 4)
+ // - the token display name, truncated to role.displaynameLength (default 4)
+ // - a UUID
+ //
+ // the entire contactenated string is then truncated to role.usernameLength,
+ // which by default is 16 due to limitations in older but still-prevalant
+ // versions of MySQL.
+ roleName := name
+ if len(roleName) > role.RolenameLength {
+ roleName = roleName[:role.RolenameLength]
+ }
+ displayName := req.DisplayName
+ if len(displayName) > role.DisplaynameLength {
+ displayName = displayName[:role.DisplaynameLength]
+ }
+ userUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ return nil, err
+ }
+ username := fmt.Sprintf("%s-%s-%s", roleName, displayName, userUUID)
+ if len(username) > role.UsernameLength {
+ username = username[:role.UsernameLength]
+ }
+ password, err := uuid.GenerateUUID()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get our handle
+ db, err := b.DB(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+
+ // Start a transaction
+ tx, err := db.Begin()
+ if err != nil {
+ return nil, err
+ }
+ defer tx.Rollback()
+
+ // Execute each query
+ for _, query := range strutil.ParseArbitraryStringSlice(role.SQL, ";") {
+ query = strings.TrimSpace(query)
+ if len(query) == 0 {
+ continue
+ }
+
+ stmt, err := tx.Prepare(Query(query, map[string]string{
+ "name": username,
+ "password": password,
+ }))
+ if err != nil {
+ return nil, err
+ }
+ defer stmt.Close()
+ if _, err := stmt.Exec(); err != nil {
+ return nil, err
+ }
+ }
+
+ // Commit the transaction
+ if err := tx.Commit(); err != nil {
+ return nil, err
+ }
+
+ // Return the secret
+ resp := b.Secret(SecretCredsType).Response(map[string]interface{}{
+ "username": username,
+ "password": password,
+ }, map[string]interface{}{
+ "username": username,
+ "role": name,
+ })
+ resp.Secret.TTL = lease.Lease
+ return resp, nil
+}
+
+const pathRoleCreateReadHelpSyn = `
+Request database credentials for a certain role.
+`
+
+const pathRoleCreateReadHelpDesc = `
+This path reads database credentials for a certain role. The
+database credentials will be generated on demand and will be automatically
+revoked when the lease is up.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/path_roles.go b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/path_roles.go
new file mode 100644
index 0000000..97feccd
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/path_roles.go
@@ -0,0 +1,233 @@
+package mysql
+
+import (
+ "fmt"
+ "strings"
+
+ _ "github.com/go-sql-driver/mysql"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathListRoles(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "roles/?$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.pathRoleList,
+ },
+
+ HelpSynopsis: pathRoleHelpSyn,
+ HelpDescription: pathRoleHelpDesc,
+ }
+}
+
+func pathRoles(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "roles/" + framework.GenericNameRegex("name"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": {
+ Type: framework.TypeString,
+ Description: "Name of the role.",
+ },
+
+ "sql": {
+ Type: framework.TypeString,
+ Description: "SQL string to create a user. See help for more info.",
+ },
+
+ "revocation_sql": {
+ Type: framework.TypeString,
+ Description: "SQL string to revoke a user. See help for more info.",
+ },
+
+ "username_length": {
+ Type: framework.TypeInt,
+ Description: "number of characters to truncate generated mysql usernames to (default 16)",
+ Default: 16,
+ },
+
+ "rolename_length": {
+ Type: framework.TypeInt,
+ Description: "number of characters to truncate the rolename portion of generated mysql usernames to (default 4)",
+ Default: 4,
+ },
+
+ "displayname_length": {
+ Type: framework.TypeInt,
+ Description: "number of characters to truncate the displayname portion of generated mysql usernames to (default 4)",
+ Default: 4,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathRoleRead,
+ logical.UpdateOperation: b.pathRoleCreate,
+ logical.DeleteOperation: b.pathRoleDelete,
+ },
+
+ HelpSynopsis: pathRoleHelpSyn,
+ HelpDescription: pathRoleHelpDesc,
+ }
+}
+
+func (b *backend) Role(s logical.Storage, n string) (*roleEntry, error) {
+ entry, err := s.Get("role/" + n)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ // Set defaults to handle upgrade cases
+ result := roleEntry{
+ UsernameLength: 16,
+ RolenameLength: 4,
+ DisplaynameLength: 4,
+ }
+
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+func (b *backend) pathRoleDelete(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ err := req.Storage.Delete("role/" + data.Get("name").(string))
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *backend) pathRoleRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ role, err := b.Role(req.Storage, data.Get("name").(string))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "sql": role.SQL,
+ "revocation_sql": role.RevocationSQL,
+ },
+ }, nil
+}
+
+func (b *backend) pathRoleList(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ entries, err := req.Storage.List("role/")
+ if err != nil {
+ return nil, err
+ }
+
+ return logical.ListResponse(entries), nil
+}
+
+func (b *backend) pathRoleCreate(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ name := data.Get("name").(string)
+
+ // Get our connection
+ db, err := b.DB(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+
+ // Test the query by trying to prepare it
+ sql := data.Get("sql").(string)
+ for _, query := range strutil.ParseArbitraryStringSlice(sql, ";") {
+ query = strings.TrimSpace(query)
+ if len(query) == 0 {
+ continue
+ }
+
+ stmt, err := db.Prepare(Query(query, map[string]string{
+ "name": "foo",
+ "password": "bar",
+ }))
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Error testing query: %s", err)), nil
+ }
+ stmt.Close()
+ }
+
+ // Store it
+ entry, err := logical.StorageEntryJSON("role/"+name, &roleEntry{
+ SQL: sql,
+ RevocationSQL: data.Get("revocation_sql").(string),
+ UsernameLength: data.Get("username_length").(int),
+ DisplaynameLength: data.Get("displayname_length").(int),
+ RolenameLength: data.Get("rolename_length").(int),
+ })
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+ return nil, nil
+}
+
+type roleEntry struct {
+ SQL string `json:"sql" mapstructure:"sql" structs:"sql"`
+ RevocationSQL string `json:"revocation_sql" mapstructure:"revocation_sql" structs:"revocation_sql"`
+ UsernameLength int `json:"username_length" mapstructure:"username_length" structs:"username_length"`
+ DisplaynameLength int `json:"displayname_length" mapstructure:"displayname_length" structs:"displayname_length"`
+ RolenameLength int `json:"rolename_length" mapstructure:"rolename_length" structs:"rolename_length"`
+}
+
+const pathRoleHelpSyn = `
+Manage the roles that can be created with this backend.
+`
+
+const pathRoleHelpDesc = `
+This path lets you manage the roles that can be created with this backend.
+
+The "sql" parameter customizes the SQL string used to create the role.
+This can be a sequence of SQL queries, each semi-colon seperated. Some
+substitution will be done to the SQL string for certain keys.
+The names of the variables must be surrounded by "{{" and "}}" to be replaced.
+
+ * "name" - The random username generated for the DB user.
+
+ * "password" - The random password generated for the DB user.
+
+Example of a decent SQL query to use:
+
+ CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';
+ GRANT ALL ON db1.* TO '{{name}}'@'%';
+
+Note the above user would be able to access anything in db1. Please see the MySQL
+manual on the GRANT command to learn how to do more fine grained access.
+
+The "rolename_length" parameter determines how many characters of the role name
+will be used in creating the generated mysql username; the default is 4.
+
+The "displayname_length" parameter determines how many characters of the token
+display name will be used in creating the generated mysql username; the default
+is 4.
+
+The "username_length" parameter determines how many total characters the
+generated username (including the role name, token display name and the uuid
+portion) will be truncated to. Versions of MySQL prior to 5.7.8 are limited to
+16 characters total (see
+http://dev.mysql.com/doc/refman/5.7/en/user-names.html) so that is the default;
+for versions >=5.7.8 it is safe to increase this to 32.
+
+For best readability in MySQL process lists, we recommend using MySQL 5.7.8 or
+later, setting "username_length" to 32 and setting both "rolename_length" and
+"displayname_length" to 8. However due the the prevalence of older versions of
+MySQL in general deployment, the defaults are currently tuned for a
+username_length of 16.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/secret_creds.go b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/secret_creds.go
new file mode 100644
index 0000000..b8d6513
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/secret_creds.go
@@ -0,0 +1,132 @@
+package mysql
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+const SecretCredsType = "creds"
+
+// defaultRevocationSQL is a default SQL statement for revoking a user. Revoking
+// permissions for the user is done before the drop, because MySQL explicitly
+// documents that open user connections will not be closed. By revoking all
+// grants, at least we ensure that the open connection is useless. Dropping the
+// user will only affect the next connection.
+const defaultRevocationSQL = `
+REVOKE ALL PRIVILEGES, GRANT OPTION FROM '{{name}}'@'%';
+DROP USER '{{name}}'@'%'
+`
+
+func secretCreds(b *backend) *framework.Secret {
+ return &framework.Secret{
+ Type: SecretCredsType,
+ Fields: map[string]*framework.FieldSchema{
+ "username": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Username",
+ },
+
+ "password": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Password",
+ },
+ },
+
+ Renew: b.secretCredsRenew,
+ Revoke: b.secretCredsRevoke,
+ }
+}
+
+func (b *backend) secretCredsRenew(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ // Get the lease information
+ lease, err := b.Lease(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+ if lease == nil {
+ lease = &configLease{}
+ }
+
+ f := framework.LeaseExtend(lease.Lease, lease.LeaseMax, b.System())
+ return f(req, d)
+}
+
+func (b *backend) secretCredsRevoke(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ var resp *logical.Response
+
+ // Get the username from the internal data
+ usernameRaw, ok := req.Secret.InternalData["username"]
+ if !ok {
+ return nil, fmt.Errorf("secret is missing username internal data")
+ }
+ username, ok := usernameRaw.(string)
+
+ // Get our connection
+ db, err := b.DB(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+
+ roleName := ""
+ roleNameRaw, ok := req.Secret.InternalData["role"]
+ if ok {
+ roleName = roleNameRaw.(string)
+ }
+
+ var role *roleEntry
+ if roleName != "" {
+ role, err = b.Role(req.Storage, roleName)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Use a default SQL statement for revocation if one cannot be fetched from the role
+ revocationSQL := defaultRevocationSQL
+
+ if role != nil && role.RevocationSQL != "" {
+ revocationSQL = role.RevocationSQL
+ } else {
+ if resp == nil {
+ resp = &logical.Response{}
+ }
+ resp.AddWarning(fmt.Sprintf("Role %q cannot be found. Using default SQL for revoking user.", roleName))
+ }
+
+ // Start a transaction
+ tx, err := db.Begin()
+ if err != nil {
+ return nil, err
+ }
+ defer tx.Rollback()
+
+ for _, query := range strutil.ParseArbitraryStringSlice(revocationSQL, ";") {
+ query = strings.TrimSpace(query)
+ if len(query) == 0 {
+ continue
+ }
+
+ // This is not a prepared statement because not all commands are supported
+ // 1295: This command is not supported in the prepared statement protocol yet
+ // Reference https://mariadb.com/kb/en/mariadb/prepare-statement/
+ query = strings.Replace(query, "{{name}}", username, -1)
+ _, err = tx.Exec(query)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+
+ // Commit the transaction
+ if err := tx.Commit(); err != nil {
+ return nil, err
+ }
+
+ return resp, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/util.go b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/util.go
new file mode 100644
index 0000000..313264f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/util.go
@@ -0,0 +1,15 @@
+package mysql
+
+import (
+ "fmt"
+ "strings"
+)
+
+// Query templates a query for us.
+func Query(tpl string, data map[string]string) string {
+ for k, v := range data {
+ tpl = strings.Replace(tpl, fmt.Sprintf("{{%s}}", k), v, -1)
+ }
+
+ return tpl
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/backend.go
new file mode 100644
index 0000000..6128028
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/backend.go
@@ -0,0 +1,86 @@
+package pki
+
+import (
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+// Factory creates a new backend implementing the logical.Backend interface
+func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
+ return Backend().Setup(conf)
+}
+
+// Backend returns a new Backend framework struct
+func Backend() *backend {
+ var b backend
+ b.Backend = &framework.Backend{
+ Help: strings.TrimSpace(backendHelp),
+
+ PathsSpecial: &logical.Paths{
+ Unauthenticated: []string{
+ "cert/*",
+ "ca/pem",
+ "ca_chain",
+ "ca",
+ "crl/pem",
+ "crl",
+ },
+
+ LocalStorage: []string{
+ "revoked/",
+ "crl",
+ "certs/",
+ },
+ },
+
+ Paths: []*framework.Path{
+ pathListRoles(&b),
+ pathRoles(&b),
+ pathGenerateRoot(&b),
+ pathGenerateIntermediate(&b),
+ pathSetSignedIntermediate(&b),
+ pathSignIntermediate(&b),
+ pathConfigCA(&b),
+ pathConfigCRL(&b),
+ pathConfigURLs(&b),
+ pathSignVerbatim(&b),
+ pathSign(&b),
+ pathIssue(&b),
+ pathRotateCRL(&b),
+ pathFetchCA(&b),
+ pathFetchCAChain(&b),
+ pathFetchCRL(&b),
+ pathFetchCRLViaCertPath(&b),
+ pathFetchValid(&b),
+ pathFetchListCerts(&b),
+ pathRevoke(&b),
+ pathTidy(&b),
+ },
+
+ Secrets: []*framework.Secret{
+ secretCerts(&b),
+ },
+ }
+
+ b.crlLifetime = time.Hour * 72
+
+ return &b
+}
+
+type backend struct {
+ *framework.Backend
+
+ crlLifetime time.Duration
+ revokeStorageLock sync.RWMutex
+}
+
+const backendHelp = `
+The PKI backend dynamically generates X509 server and client certificates.
+
+After mounting this backend, configure the CA using the "pem_bundle" endpoint within
+the "config/" path.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/backend_test.go
new file mode 100644
index 0000000..4fefc95
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/backend_test.go
@@ -0,0 +1,2284 @@
+package pki
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/base64"
+ "encoding/pem"
+ "fmt"
+ "math"
+ mathrand "math/rand"
+ "net"
+ "os"
+ "reflect"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/fatih/structs"
+ "github.com/hashicorp/vault/helper/certutil"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/logical"
+ logicaltest "github.com/hashicorp/vault/logical/testing"
+ "github.com/mitchellh/mapstructure"
+)
+
+var (
+ stepCount = 0
+ serialUnderTest string
+ parsedKeyUsageUnderTest int
+)
+
+// Performs basic tests on CA functionality
+// Uses the RSA CA key
+func TestBackend_RSAKey(t *testing.T) {
+ defaultLeaseTTLVal := time.Hour * 24
+ maxLeaseTTLVal := time.Hour * 24 * 32
+ b, err := Factory(&logical.BackendConfig{
+ Logger: nil,
+ System: &logical.StaticSystemView{
+ DefaultLeaseTTLVal: defaultLeaseTTLVal,
+ MaxLeaseTTLVal: maxLeaseTTLVal,
+ },
+ })
+ if err != nil {
+ t.Fatalf("Unable to create backend: %s", err)
+ }
+
+ testCase := logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{},
+ }
+
+ stepCount = len(testCase.Steps)
+
+ intdata := map[string]interface{}{}
+ reqdata := map[string]interface{}{}
+ testCase.Steps = append(testCase.Steps, generateCATestingSteps(t, rsaCACert, rsaCAKey, ecCACert, intdata, reqdata)...)
+
+ logicaltest.Test(t, testCase)
+}
+
+// Performs basic tests on CA functionality
+// Uses the EC CA key
+func TestBackend_ECKey(t *testing.T) {
+ defaultLeaseTTLVal := time.Hour * 24
+ maxLeaseTTLVal := time.Hour * 24 * 32
+ b, err := Factory(&logical.BackendConfig{
+ Logger: nil,
+ System: &logical.StaticSystemView{
+ DefaultLeaseTTLVal: defaultLeaseTTLVal,
+ MaxLeaseTTLVal: maxLeaseTTLVal,
+ },
+ })
+ if err != nil {
+ t.Fatalf("Unable to create backend: %s", err)
+ }
+
+ testCase := logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{},
+ }
+
+ stepCount = len(testCase.Steps)
+
+ intdata := map[string]interface{}{}
+ reqdata := map[string]interface{}{}
+ testCase.Steps = append(testCase.Steps, generateCATestingSteps(t, ecCACert, ecCAKey, rsaCACert, intdata, reqdata)...)
+
+ logicaltest.Test(t, testCase)
+}
+
+func TestBackend_CSRValues(t *testing.T) {
+ defaultLeaseTTLVal := time.Hour * 24
+ maxLeaseTTLVal := time.Hour * 24 * 32
+ b, err := Factory(&logical.BackendConfig{
+ Logger: nil,
+ System: &logical.StaticSystemView{
+ DefaultLeaseTTLVal: defaultLeaseTTLVal,
+ MaxLeaseTTLVal: maxLeaseTTLVal,
+ },
+ })
+ if err != nil {
+ t.Fatalf("Unable to create backend: %s", err)
+ }
+
+ testCase := logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{},
+ }
+
+ stepCount = len(testCase.Steps)
+
+ intdata := map[string]interface{}{}
+ reqdata := map[string]interface{}{}
+ testCase.Steps = append(testCase.Steps, generateCSRSteps(t, ecCACert, ecCAKey, intdata, reqdata)...)
+
+ logicaltest.Test(t, testCase)
+}
+
+func TestBackend_URLsCRUD(t *testing.T) {
+ defaultLeaseTTLVal := time.Hour * 24
+ maxLeaseTTLVal := time.Hour * 24 * 32
+ b, err := Factory(&logical.BackendConfig{
+ Logger: nil,
+ System: &logical.StaticSystemView{
+ DefaultLeaseTTLVal: defaultLeaseTTLVal,
+ MaxLeaseTTLVal: maxLeaseTTLVal,
+ },
+ })
+ if err != nil {
+ t.Fatalf("Unable to create backend: %s", err)
+ }
+
+ testCase := logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{},
+ }
+
+ stepCount = len(testCase.Steps)
+
+ intdata := map[string]interface{}{}
+ reqdata := map[string]interface{}{}
+ testCase.Steps = append(testCase.Steps, generateURLSteps(t, ecCACert, ecCAKey, intdata, reqdata)...)
+
+ logicaltest.Test(t, testCase)
+}
+
+// Generates and tests steps that walk through the various possibilities
+// of role flags to ensure that they are properly restricted
+// Uses the RSA CA key
+func TestBackend_RSARoles(t *testing.T) {
+ defaultLeaseTTLVal := time.Hour * 24
+ maxLeaseTTLVal := time.Hour * 24 * 32
+ b, err := Factory(&logical.BackendConfig{
+ Logger: nil,
+ System: &logical.StaticSystemView{
+ DefaultLeaseTTLVal: defaultLeaseTTLVal,
+ MaxLeaseTTLVal: maxLeaseTTLVal,
+ },
+ })
+ if err != nil {
+ t.Fatalf("Unable to create backend: %s", err)
+ }
+
+ testCase := logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config/ca",
+ Data: map[string]interface{}{
+ "pem_bundle": rsaCAKey + rsaCACert,
+ },
+ },
+ },
+ }
+
+ stepCount = len(testCase.Steps)
+
+ testCase.Steps = append(testCase.Steps, generateRoleSteps(t, false)...)
+ if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 {
+ for i, v := range testCase.Steps {
+ fmt.Printf("Step %d:\n%+v\n\n", i+stepCount, v)
+ }
+ }
+
+ logicaltest.Test(t, testCase)
+}
+
+// Generates and tests steps that walk through the various possibilities
+// of role flags to ensure that they are properly restricted
+// Uses the RSA CA key
+func TestBackend_RSARoles_CSR(t *testing.T) {
+ defaultLeaseTTLVal := time.Hour * 24
+ maxLeaseTTLVal := time.Hour * 24 * 32
+ b, err := Factory(&logical.BackendConfig{
+ Logger: nil,
+ System: &logical.StaticSystemView{
+ DefaultLeaseTTLVal: defaultLeaseTTLVal,
+ MaxLeaseTTLVal: maxLeaseTTLVal,
+ },
+ })
+ if err != nil {
+ t.Fatalf("Unable to create backend: %s", err)
+ }
+
+ testCase := logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config/ca",
+ Data: map[string]interface{}{
+ "pem_bundle": rsaCAKey + rsaCACert + rsaCAChain,
+ },
+ },
+ },
+ }
+
+ stepCount = len(testCase.Steps)
+
+ testCase.Steps = append(testCase.Steps, generateRoleSteps(t, true)...)
+ if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 {
+ for i, v := range testCase.Steps {
+ fmt.Printf("Step %d:\n%+v\n\n", i+stepCount, v)
+ }
+ }
+
+ logicaltest.Test(t, testCase)
+}
+
+// Generates and tests steps that walk through the various possibilities
+// of role flags to ensure that they are properly restricted
+// Uses the EC CA key
+func TestBackend_ECRoles(t *testing.T) {
+ defaultLeaseTTLVal := time.Hour * 24
+ maxLeaseTTLVal := time.Hour * 24 * 32
+ b, err := Factory(&logical.BackendConfig{
+ Logger: nil,
+ System: &logical.StaticSystemView{
+ DefaultLeaseTTLVal: defaultLeaseTTLVal,
+ MaxLeaseTTLVal: maxLeaseTTLVal,
+ },
+ })
+ if err != nil {
+ t.Fatalf("Unable to create backend: %s", err)
+ }
+
+ testCase := logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config/ca",
+ Data: map[string]interface{}{
+ "pem_bundle": ecCAKey + ecCACert,
+ },
+ },
+ },
+ }
+
+ stepCount = len(testCase.Steps)
+
+ testCase.Steps = append(testCase.Steps, generateRoleSteps(t, false)...)
+ if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 {
+ for i, v := range testCase.Steps {
+ fmt.Printf("Step %d:\n%+v\n\n", i+stepCount, v)
+ }
+ }
+
+ logicaltest.Test(t, testCase)
+}
+
+// Generates and tests steps that walk through the various possibilities
+// of role flags to ensure that they are properly restricted
+// Uses the EC CA key
+func TestBackend_ECRoles_CSR(t *testing.T) {
+ defaultLeaseTTLVal := time.Hour * 24
+ maxLeaseTTLVal := time.Hour * 24 * 32
+ b, err := Factory(&logical.BackendConfig{
+ Logger: nil,
+ System: &logical.StaticSystemView{
+ DefaultLeaseTTLVal: defaultLeaseTTLVal,
+ MaxLeaseTTLVal: maxLeaseTTLVal,
+ },
+ })
+ if err != nil {
+ t.Fatalf("Unable to create backend: %s", err)
+ }
+
+ testCase := logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config/ca",
+ Data: map[string]interface{}{
+ "pem_bundle": ecCAKey + ecCACert,
+ },
+ },
+ },
+ }
+
+ stepCount = len(testCase.Steps)
+
+ testCase.Steps = append(testCase.Steps, generateRoleSteps(t, true)...)
+ if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 {
+ for i, v := range testCase.Steps {
+ fmt.Printf("Step %d:\n%+v\n\n", i+stepCount, v)
+ }
+ }
+
+ logicaltest.Test(t, testCase)
+}
+
+// Performs some validity checking on the returned bundles
+func checkCertsAndPrivateKey(keyType string, key crypto.Signer, usage x509.KeyUsage, extUsage x509.ExtKeyUsage, validity time.Duration, certBundle *certutil.CertBundle) (*certutil.ParsedCertBundle, error) {
+ parsedCertBundle, err := certBundle.ToParsedCertBundle()
+ if err != nil {
+ return nil, fmt.Errorf("Error parsing cert bundle: %s", err)
+ }
+
+ if key != nil {
+ switch keyType {
+ case "rsa":
+ parsedCertBundle.PrivateKeyType = certutil.RSAPrivateKey
+ parsedCertBundle.PrivateKey = key
+ parsedCertBundle.PrivateKeyBytes = x509.MarshalPKCS1PrivateKey(key.(*rsa.PrivateKey))
+ case "ec":
+ parsedCertBundle.PrivateKeyType = certutil.ECPrivateKey
+ parsedCertBundle.PrivateKey = key
+ parsedCertBundle.PrivateKeyBytes, err = x509.MarshalECPrivateKey(key.(*ecdsa.PrivateKey))
+ if err != nil {
+ return nil, fmt.Errorf("Error parsing EC key: %s", err)
+ }
+ }
+ }
+
+ switch {
+ case parsedCertBundle.Certificate == nil:
+ return nil, fmt.Errorf("Did not find a certificate in the cert bundle")
+ case len(parsedCertBundle.CAChain) == 0 || parsedCertBundle.CAChain[0].Certificate == nil:
+ return nil, fmt.Errorf("Did not find a CA in the cert bundle")
+ case parsedCertBundle.PrivateKey == nil:
+ return nil, fmt.Errorf("Did not find a private key in the cert bundle")
+ case parsedCertBundle.PrivateKeyType == certutil.UnknownPrivateKey:
+ return nil, fmt.Errorf("Could not figure out type of private key")
+ }
+
+ switch {
+ case parsedCertBundle.PrivateKeyType == certutil.RSAPrivateKey && keyType != "rsa":
+ fallthrough
+ case parsedCertBundle.PrivateKeyType == certutil.ECPrivateKey && keyType != "ec":
+ return nil, fmt.Errorf("Given key type does not match type found in bundle")
+ }
+
+ cert := parsedCertBundle.Certificate
+
+ if usage != cert.KeyUsage {
+ return nil, fmt.Errorf("Expected usage of %#v, got %#v; ext usage is %#v", usage, cert.KeyUsage, cert.ExtKeyUsage)
+ }
+
+ // There should only be one ext usage type, because only one is requested
+ // in the tests
+ if len(cert.ExtKeyUsage) != 1 {
+ return nil, fmt.Errorf("Got wrong size key usage in generated cert; expected 1, values are %#v", cert.ExtKeyUsage)
+ }
+ switch extUsage {
+ case x509.ExtKeyUsageEmailProtection:
+ if cert.ExtKeyUsage[0] != x509.ExtKeyUsageEmailProtection {
+ return nil, fmt.Errorf("Bad extended key usage")
+ }
+ case x509.ExtKeyUsageServerAuth:
+ if cert.ExtKeyUsage[0] != x509.ExtKeyUsageServerAuth {
+ return nil, fmt.Errorf("Bad extended key usage")
+ }
+ case x509.ExtKeyUsageClientAuth:
+ if cert.ExtKeyUsage[0] != x509.ExtKeyUsageClientAuth {
+ return nil, fmt.Errorf("Bad extended key usage")
+ }
+ case x509.ExtKeyUsageCodeSigning:
+ if cert.ExtKeyUsage[0] != x509.ExtKeyUsageCodeSigning {
+ return nil, fmt.Errorf("Bad extended key usage")
+ }
+ }
+
+ // 40 seconds since we add 30 second slack for clock skew
+ if math.Abs(float64(time.Now().Unix()-cert.NotBefore.Unix())) > 40 {
+ return nil, fmt.Errorf("Validity period starts out of range")
+ }
+ if !cert.NotBefore.Before(time.Now().Add(-10 * time.Second)) {
+ return nil, fmt.Errorf("Validity period not far enough in the past")
+ }
+
+ if math.Abs(float64(time.Now().Add(validity).Unix()-cert.NotAfter.Unix())) > 10 {
+ return nil, fmt.Errorf("Validity period of %d too large vs max of 10", cert.NotAfter.Unix())
+ }
+
+ return parsedCertBundle, nil
+}
+
+func generateURLSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[string]interface{}) []logicaltest.TestStep {
+ expected := urlEntries{
+ IssuingCertificates: []string{
+ "http://example.com/ca1",
+ "http://example.com/ca2",
+ },
+ CRLDistributionPoints: []string{
+ "http://example.com/crl1",
+ "http://example.com/crl2",
+ },
+ OCSPServers: []string{
+ "http://example.com/ocsp1",
+ "http://example.com/ocsp2",
+ },
+ }
+ csrTemplate := x509.CertificateRequest{
+ Subject: pkix.Name{
+ CommonName: "my@example.com",
+ },
+ }
+
+ priv1024, _ := rsa.GenerateKey(rand.Reader, 1024)
+ csr1024, _ := x509.CreateCertificateRequest(rand.Reader, &csrTemplate, priv1024)
+ csrPem1024 := pem.EncodeToMemory(&pem.Block{
+ Type: "CERTIFICATE REQUEST",
+ Bytes: csr1024,
+ })
+
+ priv2048, _ := rsa.GenerateKey(rand.Reader, 2048)
+ csr2048, _ := x509.CreateCertificateRequest(rand.Reader, &csrTemplate, priv2048)
+ csrPem2048 := pem.EncodeToMemory(&pem.Block{
+ Type: "CERTIFICATE REQUEST",
+ Bytes: csr2048,
+ })
+
+ ret := []logicaltest.TestStep{
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "root/generate/exported",
+ Data: map[string]interface{}{
+ "common_name": "Root Cert",
+ "ttl": "180h",
+ },
+ Check: func(resp *logical.Response) error {
+ if resp.Secret != nil && resp.Secret.LeaseID != "" {
+ return fmt.Errorf("root returned with a lease")
+ }
+ return nil
+ },
+ },
+
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config/urls",
+ Data: map[string]interface{}{
+ "issuing_certificates": strings.Join(expected.IssuingCertificates, ","),
+ "crl_distribution_points": strings.Join(expected.CRLDistributionPoints, ","),
+ "ocsp_servers": strings.Join(expected.OCSPServers, ","),
+ },
+ },
+
+ logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "config/urls",
+ Check: func(resp *logical.Response) error {
+ if resp.Data == nil {
+ return fmt.Errorf("no data returned")
+ }
+ var entries urlEntries
+ err := mapstructure.Decode(resp.Data, &entries)
+ if err != nil {
+ return err
+ }
+
+ if !reflect.DeepEqual(entries, expected) {
+ return fmt.Errorf("expected urls\n%#v\ndoes not match provided\n%#v\n", expected, entries)
+ }
+
+ return nil
+ },
+ },
+
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "root/sign-intermediate",
+ Data: map[string]interface{}{
+ "common_name": "Intermediate Cert",
+ "csr": string(csrPem1024),
+ "format": "der",
+ },
+ ErrorOk: true,
+ Check: func(resp *logical.Response) error {
+ if !resp.IsError() {
+ return fmt.Errorf("expected an error response but did not get one")
+ }
+ if !strings.Contains(resp.Data["error"].(string), "2048") {
+ return fmt.Errorf("recieved an error but not about a 1024-bit key, error was: %s", resp.Data["error"].(string))
+ }
+
+ return nil
+ },
+ },
+
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "root/sign-intermediate",
+ Data: map[string]interface{}{
+ "common_name": "Intermediate Cert",
+ "csr": string(csrPem2048),
+ "format": "der",
+ },
+ Check: func(resp *logical.Response) error {
+ certString := resp.Data["certificate"].(string)
+ if certString == "" {
+ return fmt.Errorf("no certificate returned")
+ }
+ if resp.Secret != nil && resp.Secret.LeaseID != "" {
+ return fmt.Errorf("signed intermediate returned with a lease")
+ }
+ certBytes, _ := base64.StdEncoding.DecodeString(certString)
+ certs, err := x509.ParseCertificates(certBytes)
+ if err != nil {
+ return fmt.Errorf("returned cert cannot be parsed: %v", err)
+ }
+ if len(certs) != 1 {
+ return fmt.Errorf("unexpected returned length of certificates: %d", len(certs))
+ }
+ cert := certs[0]
+
+ switch {
+ case !reflect.DeepEqual(expected.IssuingCertificates, cert.IssuingCertificateURL):
+ return fmt.Errorf("expected\n%#v\ngot\n%#v\n", expected.IssuingCertificates, cert.IssuingCertificateURL)
+ case !reflect.DeepEqual(expected.CRLDistributionPoints, cert.CRLDistributionPoints):
+ return fmt.Errorf("expected\n%#v\ngot\n%#v\n", expected.CRLDistributionPoints, cert.CRLDistributionPoints)
+ case !reflect.DeepEqual(expected.OCSPServers, cert.OCSPServer):
+ return fmt.Errorf("expected\n%#v\ngot\n%#v\n", expected.OCSPServers, cert.OCSPServer)
+ case !reflect.DeepEqual([]string{"Intermediate Cert"}, cert.DNSNames):
+ return fmt.Errorf("expected\n%#v\ngot\n%#v\n", []string{"Intermediate Cert"}, cert.DNSNames)
+ }
+
+ return nil
+ },
+ },
+
+ // Same as above but exclude adding to sans
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "root/sign-intermediate",
+ Data: map[string]interface{}{
+ "common_name": "Intermediate Cert",
+ "csr": string(csrPem2048),
+ "format": "der",
+ "exclude_cn_from_sans": true,
+ },
+ Check: func(resp *logical.Response) error {
+ certString := resp.Data["certificate"].(string)
+ if certString == "" {
+ return fmt.Errorf("no certificate returned")
+ }
+ if resp.Secret != nil && resp.Secret.LeaseID != "" {
+ return fmt.Errorf("signed intermediate returned with a lease")
+ }
+ certBytes, _ := base64.StdEncoding.DecodeString(certString)
+ certs, err := x509.ParseCertificates(certBytes)
+ if err != nil {
+ return fmt.Errorf("returned cert cannot be parsed: %v", err)
+ }
+ if len(certs) != 1 {
+ return fmt.Errorf("unexpected returned length of certificates: %d", len(certs))
+ }
+ cert := certs[0]
+
+ switch {
+ case !reflect.DeepEqual(expected.IssuingCertificates, cert.IssuingCertificateURL):
+ return fmt.Errorf("expected\n%#v\ngot\n%#v\n", expected.IssuingCertificates, cert.IssuingCertificateURL)
+ case !reflect.DeepEqual(expected.CRLDistributionPoints, cert.CRLDistributionPoints):
+ return fmt.Errorf("expected\n%#v\ngot\n%#v\n", expected.CRLDistributionPoints, cert.CRLDistributionPoints)
+ case !reflect.DeepEqual(expected.OCSPServers, cert.OCSPServer):
+ return fmt.Errorf("expected\n%#v\ngot\n%#v\n", expected.OCSPServers, cert.OCSPServer)
+ case !reflect.DeepEqual([]string(nil), cert.DNSNames):
+ return fmt.Errorf("expected\n%#v\ngot\n%#v\n", []string(nil), cert.DNSNames)
+ }
+
+ return nil
+ },
+ },
+ }
+ return ret
+}
+
+func generateCSRSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[string]interface{}) []logicaltest.TestStep {
+ csrTemplate := x509.CertificateRequest{
+ Subject: pkix.Name{
+ Country: []string{"MyCountry"},
+ PostalCode: []string{"MyPostalCode"},
+ SerialNumber: "MySerialNumber",
+ CommonName: "my@example.com",
+ },
+ DNSNames: []string{
+ "name1.example.com",
+ "name2.example.com",
+ "name3.example.com",
+ },
+ EmailAddresses: []string{
+ "name1@example.com",
+ "name2@example.com",
+ "name3@example.com",
+ },
+ IPAddresses: []net.IP{
+ net.ParseIP("::ff:1:2:3:4"),
+ net.ParseIP("::ff:5:6:7:8"),
+ },
+ }
+
+ priv, _ := rsa.GenerateKey(rand.Reader, 2048)
+ csr, _ := x509.CreateCertificateRequest(rand.Reader, &csrTemplate, priv)
+ csrPem := pem.EncodeToMemory(&pem.Block{
+ Type: "CERTIFICATE REQUEST",
+ Bytes: csr,
+ })
+
+ ret := []logicaltest.TestStep{
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "root/generate/exported",
+ Data: map[string]interface{}{
+ "common_name": "Root Cert",
+ "ttl": "180h",
+ "max_path_length": 0,
+ },
+ },
+
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "root/sign-intermediate",
+ Data: map[string]interface{}{
+ "use_csr_values": true,
+ "csr": string(csrPem),
+ "format": "der",
+ },
+ ErrorOk: true,
+ },
+
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "root/generate/exported",
+ Data: map[string]interface{}{
+ "common_name": "Root Cert",
+ "ttl": "180h",
+ "max_path_length": 1,
+ },
+ },
+
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "root/sign-intermediate",
+ Data: map[string]interface{}{
+ "use_csr_values": true,
+ "csr": string(csrPem),
+ "format": "der",
+ },
+ Check: func(resp *logical.Response) error {
+ certString := resp.Data["certificate"].(string)
+ if certString == "" {
+ return fmt.Errorf("no certificate returned")
+ }
+ certBytes, _ := base64.StdEncoding.DecodeString(certString)
+ certs, err := x509.ParseCertificates(certBytes)
+ if err != nil {
+ return fmt.Errorf("returned cert cannot be parsed: %v", err)
+ }
+ if len(certs) != 1 {
+ return fmt.Errorf("unexpected returned length of certificates: %d", len(certs))
+ }
+ cert := certs[0]
+
+ if cert.MaxPathLen != 0 {
+ return fmt.Errorf("max path length of %d does not match the requested of 3", cert.MaxPathLen)
+ }
+ if !cert.MaxPathLenZero {
+ return fmt.Errorf("max path length zero is not set")
+ }
+
+ // We need to set these as they are filled in with unparsed values in the final cert
+ csrTemplate.Subject.Names = cert.Subject.Names
+ csrTemplate.Subject.ExtraNames = cert.Subject.ExtraNames
+
+ switch {
+ case !reflect.DeepEqual(cert.Subject, csrTemplate.Subject):
+ return fmt.Errorf("cert subject\n%#v\ndoes not match csr subject\n%#v\n", cert.Subject, csrTemplate.Subject)
+ case !reflect.DeepEqual(cert.DNSNames, csrTemplate.DNSNames):
+ return fmt.Errorf("cert dns names\n%#v\ndoes not match csr dns names\n%#v\n", cert.DNSNames, csrTemplate.DNSNames)
+ case !reflect.DeepEqual(cert.EmailAddresses, csrTemplate.EmailAddresses):
+ return fmt.Errorf("cert email addresses\n%#v\ndoes not match csr email addresses\n%#v\n", cert.EmailAddresses, csrTemplate.EmailAddresses)
+ case !reflect.DeepEqual(cert.IPAddresses, csrTemplate.IPAddresses):
+ return fmt.Errorf("cert ip addresses\n%#v\ndoes not match csr ip addresses\n%#v\n", cert.IPAddresses, csrTemplate.IPAddresses)
+ }
+ return nil
+ },
+ },
+ }
+ return ret
+}
+
+// Generates steps to test out CA configuration -- certificates + CRL expiry,
+// and ensure that the certificates are readable after storing them
+func generateCATestingSteps(t *testing.T, caCert, caKey, otherCaCert string, intdata, reqdata map[string]interface{}) []logicaltest.TestStep {
+ setSerialUnderTest := func(req *logical.Request) error {
+ req.Path = serialUnderTest
+ return nil
+ }
+
+ ret := []logicaltest.TestStep{
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config/ca",
+ Data: map[string]interface{}{
+ "pem_bundle": caKey + caCert,
+ },
+ },
+
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config/crl",
+ Data: map[string]interface{}{
+ "expiry": "16h",
+ },
+ },
+
+ // Ensure we can fetch it back via unauthenticated means, in various formats
+ logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "cert/ca",
+ Unauthenticated: true,
+ Check: func(resp *logical.Response) error {
+ if resp.Data["certificate"].(string) != caCert {
+ return fmt.Errorf("CA certificate:\n%s\ndoes not match original:\n%s\n", resp.Data["certificate"].(string), caCert)
+ }
+ return nil
+ },
+ },
+
+ logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "ca/pem",
+ Unauthenticated: true,
+ Check: func(resp *logical.Response) error {
+ rawBytes := resp.Data["http_raw_body"].([]byte)
+ if string(rawBytes) != caCert {
+ return fmt.Errorf("CA certificate:\n%s\ndoes not match original:\n%s\n", string(rawBytes), caCert)
+ }
+ if resp.Data["http_content_type"].(string) != "application/pkix-cert" {
+ return fmt.Errorf("Expected application/pkix-cert as content-type, but got %s", resp.Data["http_content_type"].(string))
+ }
+ return nil
+ },
+ },
+
+ logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "ca",
+ Unauthenticated: true,
+ Check: func(resp *logical.Response) error {
+ rawBytes := resp.Data["http_raw_body"].([]byte)
+ pemBytes := pem.EncodeToMemory(&pem.Block{
+ Type: "CERTIFICATE",
+ Bytes: rawBytes,
+ })
+ if string(pemBytes) != caCert {
+ return fmt.Errorf("CA certificate:\n%s\ndoes not match original:\n%s\n", string(pemBytes), caCert)
+ }
+ if resp.Data["http_content_type"].(string) != "application/pkix-cert" {
+ return fmt.Errorf("Expected application/pkix-cert as content-type, but got %s", resp.Data["http_content_type"].(string))
+ }
+ return nil
+ },
+ },
+
+ logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "config/crl",
+ Check: func(resp *logical.Response) error {
+ if resp.Data["expiry"].(string) != "16h" {
+ return fmt.Errorf("CRL lifetimes do not match (got %s)", resp.Data["expiry"].(string))
+ }
+ return nil
+ },
+ },
+
+ // Ensure that both parts of the PEM bundle are required
+ // Here, just the cert
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config/ca",
+ Data: map[string]interface{}{
+ "pem_bundle": caCert,
+ },
+ ErrorOk: true,
+ },
+
+ // Here, just the key
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config/ca",
+ Data: map[string]interface{}{
+ "pem_bundle": caKey,
+ },
+ ErrorOk: true,
+ },
+
+ // Ensure we can fetch it back via unauthenticated means, in various formats
+ logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "cert/ca",
+ Unauthenticated: true,
+ Check: func(resp *logical.Response) error {
+ if resp.Data["certificate"].(string) != caCert {
+ return fmt.Errorf("CA certificate:\n%s\ndoes not match original:\n%s\n", resp.Data["certificate"].(string), caCert)
+ }
+ return nil
+ },
+ },
+
+ logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "ca/pem",
+ Unauthenticated: true,
+ Check: func(resp *logical.Response) error {
+ rawBytes := resp.Data["http_raw_body"].([]byte)
+ if string(rawBytes) != caCert {
+ return fmt.Errorf("CA certificate:\n%s\ndoes not match original:\n%s\n", string(rawBytes), caCert)
+ }
+ if resp.Data["http_content_type"].(string) != "application/pkix-cert" {
+ return fmt.Errorf("Expected application/pkix-cert as content-type, but got %s", resp.Data["http_content_type"].(string))
+ }
+ return nil
+ },
+ },
+
+ logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "ca",
+ Unauthenticated: true,
+ Check: func(resp *logical.Response) error {
+ rawBytes := resp.Data["http_raw_body"].([]byte)
+ pemBytes := pem.EncodeToMemory(&pem.Block{
+ Type: "CERTIFICATE",
+ Bytes: rawBytes,
+ })
+ if string(pemBytes) != caCert {
+ return fmt.Errorf("CA certificate:\n%s\ndoes not match original:\n%s\n", string(pemBytes), caCert)
+ }
+ if resp.Data["http_content_type"].(string) != "application/pkix-cert" {
+ return fmt.Errorf("Expected application/pkix-cert as content-type, but got %s", resp.Data["http_content_type"].(string))
+ }
+ return nil
+ },
+ },
+
+ // Test a bunch of generation stuff
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "root/generate/exported",
+ Data: map[string]interface{}{
+ "common_name": "Root Cert",
+ "ttl": "180h",
+ },
+ Check: func(resp *logical.Response) error {
+ intdata["root"] = resp.Data["certificate"].(string)
+ intdata["rootkey"] = resp.Data["private_key"].(string)
+ reqdata["pem_bundle"] = intdata["root"].(string) + "\n" + intdata["rootkey"].(string)
+ return nil
+ },
+ },
+
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "intermediate/generate/exported",
+ Data: map[string]interface{}{
+ "common_name": "Intermediate Cert",
+ },
+ Check: func(resp *logical.Response) error {
+ intdata["intermediatecsr"] = resp.Data["csr"].(string)
+ intdata["intermediatekey"] = resp.Data["private_key"].(string)
+ return nil
+ },
+ },
+
+ // Re-load the root key in so we can sign it
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config/ca",
+ Data: reqdata,
+ Check: func(resp *logical.Response) error {
+ delete(reqdata, "pem_bundle")
+ delete(reqdata, "ttl")
+ reqdata["csr"] = intdata["intermediatecsr"].(string)
+ reqdata["common_name"] = "Intermediate Cert"
+ reqdata["ttl"] = "10s"
+ return nil
+ },
+ },
+
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "root/sign-intermediate",
+ Data: reqdata,
+ Check: func(resp *logical.Response) error {
+ delete(reqdata, "csr")
+ delete(reqdata, "common_name")
+ delete(reqdata, "ttl")
+ intdata["intermediatecert"] = resp.Data["certificate"].(string)
+ reqdata["serial_number"] = resp.Data["serial_number"].(string)
+ reqdata["rsa_int_serial_number"] = resp.Data["serial_number"].(string)
+ reqdata["certificate"] = resp.Data["certificate"].(string)
+ reqdata["pem_bundle"] = intdata["intermediatekey"].(string) + "\n" + resp.Data["certificate"].(string)
+ return nil
+ },
+ },
+
+ // First load in this way to populate the private key
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config/ca",
+ Data: reqdata,
+ Check: func(resp *logical.Response) error {
+ delete(reqdata, "pem_bundle")
+ return nil
+ },
+ },
+
+ // Now test setting the intermediate, signed CA cert
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "intermediate/set-signed",
+ Data: reqdata,
+ Check: func(resp *logical.Response) error {
+ delete(reqdata, "certificate")
+
+ serialUnderTest = "cert/" + reqdata["rsa_int_serial_number"].(string)
+
+ return nil
+ },
+ },
+
+ // We expect to find a zero revocation time
+ logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ PreFlight: setSerialUnderTest,
+ Check: func(resp *logical.Response) error {
+ if resp.Data["error"] != nil && resp.Data["error"].(string) != "" {
+ return fmt.Errorf("got an error: %s", resp.Data["error"].(string))
+ }
+
+ if resp.Data["revocation_time"].(int64) != 0 {
+ return fmt.Errorf("expected a zero revocation time")
+ }
+
+ return nil
+ },
+ },
+
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "revoke",
+ Data: reqdata,
+ },
+
+ logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "crl",
+ Data: reqdata,
+ Check: func(resp *logical.Response) error {
+ crlBytes := resp.Data["http_raw_body"].([]byte)
+ certList, err := x509.ParseCRL(crlBytes)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ revokedList := certList.TBSCertList.RevokedCertificates
+ if len(revokedList) != 1 {
+ t.Fatalf("length of revoked list not 1; %d", len(revokedList))
+ }
+ revokedString := certutil.GetHexFormatted(revokedList[0].SerialNumber.Bytes(), ":")
+ if revokedString != reqdata["serial_number"].(string) {
+ t.Fatalf("got serial %s, expecting %s", revokedString, reqdata["serial_number"].(string))
+ }
+ delete(reqdata, "serial_number")
+ return nil
+ },
+ },
+
+ // Do it all again, with EC keys and DER format
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "root/generate/exported",
+ Data: map[string]interface{}{
+ "common_name": "Root Cert",
+ "ttl": "180h",
+ "key_type": "ec",
+ "key_bits": 384,
+ "format": "der",
+ },
+ Check: func(resp *logical.Response) error {
+ certBytes, _ := base64.StdEncoding.DecodeString(resp.Data["certificate"].(string))
+ certPem := pem.EncodeToMemory(&pem.Block{
+ Type: "CERTIFICATE",
+ Bytes: certBytes,
+ })
+ keyBytes, _ := base64.StdEncoding.DecodeString(resp.Data["private_key"].(string))
+ keyPem := pem.EncodeToMemory(&pem.Block{
+ Type: "EC PRIVATE KEY",
+ Bytes: keyBytes,
+ })
+ intdata["root"] = string(certPem)
+ intdata["rootkey"] = string(keyPem)
+ reqdata["pem_bundle"] = string(certPem) + "\n" + string(keyPem)
+ return nil
+ },
+ },
+
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "intermediate/generate/exported",
+ Data: map[string]interface{}{
+ "format": "der",
+ "key_type": "ec",
+ "key_bits": 384,
+ "common_name": "Intermediate Cert",
+ },
+ Check: func(resp *logical.Response) error {
+ csrBytes, _ := base64.StdEncoding.DecodeString(resp.Data["csr"].(string))
+ csrPem := pem.EncodeToMemory(&pem.Block{
+ Type: "CERTIFICATE REQUEST",
+ Bytes: csrBytes,
+ })
+ keyBytes, _ := base64.StdEncoding.DecodeString(resp.Data["private_key"].(string))
+ keyPem := pem.EncodeToMemory(&pem.Block{
+ Type: "EC PRIVATE KEY",
+ Bytes: keyBytes,
+ })
+ intdata["intermediatecsr"] = string(csrPem)
+ intdata["intermediatekey"] = string(keyPem)
+ return nil
+ },
+ },
+
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config/ca",
+ Data: reqdata,
+ Check: func(resp *logical.Response) error {
+ delete(reqdata, "pem_bundle")
+ delete(reqdata, "ttl")
+ reqdata["csr"] = intdata["intermediatecsr"].(string)
+ reqdata["common_name"] = "Intermediate Cert"
+ reqdata["ttl"] = "10s"
+ return nil
+ },
+ },
+
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "root/sign-intermediate",
+ Data: reqdata,
+ Check: func(resp *logical.Response) error {
+ delete(reqdata, "csr")
+ delete(reqdata, "common_name")
+ delete(reqdata, "ttl")
+ intdata["intermediatecert"] = resp.Data["certificate"].(string)
+ reqdata["serial_number"] = resp.Data["serial_number"].(string)
+ reqdata["ec_int_serial_number"] = resp.Data["serial_number"].(string)
+ reqdata["certificate"] = resp.Data["certificate"].(string)
+ reqdata["pem_bundle"] = intdata["intermediatekey"].(string) + "\n" + resp.Data["certificate"].(string)
+ return nil
+ },
+ },
+
+ // First load in this way to populate the private key
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config/ca",
+ Data: reqdata,
+ Check: func(resp *logical.Response) error {
+ delete(reqdata, "pem_bundle")
+ return nil
+ },
+ },
+
+ // Now test setting the intermediate, signed CA cert
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "intermediate/set-signed",
+ Data: reqdata,
+ Check: func(resp *logical.Response) error {
+ delete(reqdata, "certificate")
+
+ serialUnderTest = "cert/" + reqdata["ec_int_serial_number"].(string)
+
+ return nil
+ },
+ },
+
+ // We expect to find a zero revocation time
+ logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ PreFlight: setSerialUnderTest,
+ Check: func(resp *logical.Response) error {
+ if resp.Data["error"] != nil && resp.Data["error"].(string) != "" {
+ return fmt.Errorf("got an error: %s", resp.Data["error"].(string))
+ }
+
+ if resp.Data["revocation_time"].(int64) != 0 {
+ return fmt.Errorf("expected a zero revocation time")
+ }
+
+ return nil
+ },
+ },
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "revoke",
+ Data: reqdata,
+ },
+
+ logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "crl",
+ Data: reqdata,
+ Check: func(resp *logical.Response) error {
+ crlBytes := resp.Data["http_raw_body"].([]byte)
+ certList, err := x509.ParseCRL(crlBytes)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ revokedList := certList.TBSCertList.RevokedCertificates
+ if len(revokedList) != 2 {
+ t.Fatalf("length of revoked list not 2; %d", len(revokedList))
+ }
+ found := false
+ for _, revEntry := range revokedList {
+ revokedString := certutil.GetHexFormatted(revEntry.SerialNumber.Bytes(), ":")
+ if revokedString == reqdata["serial_number"].(string) {
+ found = true
+ }
+ }
+ if !found {
+ t.Fatalf("did not find %s in CRL", reqdata["serial_number"].(string))
+ }
+ delete(reqdata, "serial_number")
+
+ serialUnderTest = "cert/" + reqdata["rsa_int_serial_number"].(string)
+
+ return nil
+ },
+ },
+
+ // Make sure both serial numbers we expect to find are found
+ logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ PreFlight: setSerialUnderTest,
+ Check: func(resp *logical.Response) error {
+ if resp.Data["error"] != nil && resp.Data["error"].(string) != "" {
+ return fmt.Errorf("got an error: %s", resp.Data["error"].(string))
+ }
+
+ if resp.Data["revocation_time"].(int64) == 0 {
+ return fmt.Errorf("expected a non-zero revocation time")
+ }
+
+ serialUnderTest = "cert/" + reqdata["ec_int_serial_number"].(string)
+
+ return nil
+ },
+ },
+
+ logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ PreFlight: setSerialUnderTest,
+ Check: func(resp *logical.Response) error {
+ if resp.Data["error"] != nil && resp.Data["error"].(string) != "" {
+ return fmt.Errorf("got an error: %s", resp.Data["error"].(string))
+ }
+
+ if resp.Data["revocation_time"].(int64) == 0 {
+ return fmt.Errorf("expected a non-zero revocation time")
+ }
+
+ // Give time for the certificates to pass the safety buffer
+ t.Logf("Sleeping for 15 seconds to allow safety buffer time to pass before testing tidying")
+ time.Sleep(15 * time.Second)
+
+ serialUnderTest = "cert/" + reqdata["rsa_int_serial_number"].(string)
+
+ return nil
+ },
+ },
+
+ // This shouldn't do anything since the safety buffer is too long
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "tidy",
+ Data: map[string]interface{}{
+ "safety_buffer": "3h",
+ "tidy_cert_store": true,
+ "tidy_revocation_list": true,
+ },
+ },
+
+ // We still expect to find these
+ logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ PreFlight: setSerialUnderTest,
+ Check: func(resp *logical.Response) error {
+ if resp.Data["error"] != nil && resp.Data["error"].(string) != "" {
+ return fmt.Errorf("got an error: %s", resp.Data["error"].(string))
+ }
+
+ serialUnderTest = "cert/" + reqdata["ec_int_serial_number"].(string)
+
+ return nil
+ },
+ },
+
+ logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ PreFlight: setSerialUnderTest,
+ Check: func(resp *logical.Response) error {
+ if resp.Data["error"] != nil && resp.Data["error"].(string) != "" {
+ return fmt.Errorf("got an error: %s", resp.Data["error"].(string))
+ }
+
+ serialUnderTest = "cert/" + reqdata["rsa_int_serial_number"].(string)
+
+ return nil
+ },
+ },
+
+ // Both should appear in the CRL
+ logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "crl",
+ Data: reqdata,
+ Check: func(resp *logical.Response) error {
+ crlBytes := resp.Data["http_raw_body"].([]byte)
+ certList, err := x509.ParseCRL(crlBytes)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ revokedList := certList.TBSCertList.RevokedCertificates
+ if len(revokedList) != 2 {
+ t.Fatalf("length of revoked list not 2; %d", len(revokedList))
+ }
+ foundRsa := false
+ foundEc := false
+ for _, revEntry := range revokedList {
+ revokedString := certutil.GetHexFormatted(revEntry.SerialNumber.Bytes(), ":")
+ if revokedString == reqdata["rsa_int_serial_number"].(string) {
+ foundRsa = true
+ }
+ if revokedString == reqdata["ec_int_serial_number"].(string) {
+ foundEc = true
+ }
+ }
+ if !foundRsa || !foundEc {
+ t.Fatalf("did not find an expected entry in CRL")
+ }
+
+ return nil
+ },
+ },
+
+ // This shouldn't do anything since the boolean values default to false
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "tidy",
+ Data: map[string]interface{}{
+ "safety_buffer": "1s",
+ },
+ },
+
+ // We still expect to find these
+ logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ PreFlight: setSerialUnderTest,
+ Check: func(resp *logical.Response) error {
+ if resp.Data["error"] != nil && resp.Data["error"].(string) != "" {
+ return fmt.Errorf("got an error: %s", resp.Data["error"].(string))
+ }
+
+ serialUnderTest = "cert/" + reqdata["ec_int_serial_number"].(string)
+
+ return nil
+ },
+ },
+
+ logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ PreFlight: setSerialUnderTest,
+ Check: func(resp *logical.Response) error {
+ if resp.Data["error"] != nil && resp.Data["error"].(string) != "" {
+ return fmt.Errorf("got an error: %s", resp.Data["error"].(string))
+ }
+
+ serialUnderTest = "cert/" + reqdata["rsa_int_serial_number"].(string)
+
+ return nil
+ },
+ },
+
+ // This should remove the values since the safety buffer is short
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "tidy",
+ Data: map[string]interface{}{
+ "safety_buffer": "1s",
+ "tidy_cert_store": true,
+ "tidy_revocation_list": true,
+ },
+ },
+
+ // We do *not* expect to find these
+ logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ PreFlight: setSerialUnderTest,
+ Check: func(resp *logical.Response) error {
+ if resp.Data["error"] == nil || resp.Data["error"].(string) == "" {
+ return fmt.Errorf("didn't get an expected error")
+ }
+
+ serialUnderTest = "cert/" + reqdata["ec_int_serial_number"].(string)
+
+ return nil
+ },
+ },
+
+ logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ PreFlight: setSerialUnderTest,
+ Check: func(resp *logical.Response) error {
+ if resp.Data["error"] == nil || resp.Data["error"].(string) == "" {
+ return fmt.Errorf("didn't get an expected error")
+ }
+
+ serialUnderTest = "cert/" + reqdata["rsa_int_serial_number"].(string)
+
+ return nil
+ },
+ },
+
+ // Both should be gone from the CRL
+ logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "crl",
+ Data: reqdata,
+ Check: func(resp *logical.Response) error {
+ crlBytes := resp.Data["http_raw_body"].([]byte)
+ certList, err := x509.ParseCRL(crlBytes)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ revokedList := certList.TBSCertList.RevokedCertificates
+ if len(revokedList) != 0 {
+ t.Fatalf("length of revoked list not 0; %d", len(revokedList))
+ }
+
+ return nil
+ },
+ },
+ }
+
+ return ret
+}
+
+// Generates steps to test out various role permutations
+func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep {
+ roleVals := roleEntry{
+ MaxTTL: "12h",
+ KeyType: "rsa",
+ KeyBits: 2048,
+ }
+ issueVals := certutil.IssueData{}
+ ret := []logicaltest.TestStep{}
+
+ roleTestStep := logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "roles/test",
+ }
+ var issueTestStep logicaltest.TestStep
+ if useCSRs {
+ issueTestStep = logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "sign/test",
+ }
+ } else {
+ issueTestStep = logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "issue/test",
+ }
+ }
+
+ generatedRSAKeys := map[int]crypto.Signer{}
+ generatedECKeys := map[int]crypto.Signer{}
+
+ /*
+ // For the number of tests being run, a seed of 1 has been tested
+ // to hit all of the various values below. However, for normal
+ // testing we use a randomized time for maximum fuzziness.
+ */
+ var seed int64 = 1
+ fixedSeed := os.Getenv("VAULT_PKITESTS_FIXED_SEED")
+ if len(fixedSeed) == 0 {
+ seed = time.Now().UnixNano()
+ } else {
+ var err error
+ seed, err = strconv.ParseInt(fixedSeed, 10, 64)
+ if err != nil {
+ t.Fatalf("error parsing fixed seed of %s: %v", fixedSeed, err)
+ }
+ }
+ mathRand := mathrand.New(mathrand.NewSource(seed))
+ t.Logf("seed under test: %v", seed)
+
+ // Used by tests not toggling common names to turn off the behavior of random key bit fuzziness
+ keybitSizeRandOff := false
+
+ genericErrorOkCheck := func(resp *logical.Response) error {
+ if resp.IsError() {
+ return nil
+ }
+ return fmt.Errorf("Expected an error, but did not seem to get one")
+ }
+
+ // Adds tests with the currently configured issue/role information
+ addTests := func(testCheck logicaltest.TestCheckFunc) {
+ stepCount++
+ //t.Logf("test step %d\nrole vals: %#v\n", stepCount, roleVals)
+ stepCount++
+ //t.Logf("test step %d\nissue vals: %#v\n", stepCount, issueTestStep)
+ roleTestStep.Data = structs.New(roleVals).Map()
+ roleTestStep.Data["generate_lease"] = false
+ ret = append(ret, roleTestStep)
+ issueTestStep.Data = structs.New(issueVals).Map()
+ switch {
+ case issueTestStep.ErrorOk:
+ issueTestStep.Check = genericErrorOkCheck
+ case testCheck != nil:
+ issueTestStep.Check = testCheck
+ default:
+ issueTestStep.Check = nil
+ }
+ ret = append(ret, issueTestStep)
+ }
+
+ getOuCheck := func(role roleEntry) logicaltest.TestCheckFunc {
+ var certBundle certutil.CertBundle
+ return func(resp *logical.Response) error {
+ err := mapstructure.Decode(resp.Data, &certBundle)
+ if err != nil {
+ return err
+ }
+ parsedCertBundle, err := certBundle.ToParsedCertBundle()
+ if err != nil {
+ return fmt.Errorf("Error checking generated certificate: %s", err)
+ }
+ cert := parsedCertBundle.Certificate
+
+ expected := strutil.ParseDedupLowercaseAndSortStrings(role.OU, ",")
+ if !reflect.DeepEqual(cert.Subject.OrganizationalUnit, expected) {
+ return fmt.Errorf("Error: returned certificate has OU of %s but %s was specified in the role.", cert.Subject.OrganizationalUnit, expected)
+ }
+ return nil
+ }
+ }
+
+ getOrganizationCheck := func(role roleEntry) logicaltest.TestCheckFunc {
+ var certBundle certutil.CertBundle
+ return func(resp *logical.Response) error {
+ err := mapstructure.Decode(resp.Data, &certBundle)
+ if err != nil {
+ return err
+ }
+ parsedCertBundle, err := certBundle.ToParsedCertBundle()
+ if err != nil {
+ return fmt.Errorf("Error checking generated certificate: %s", err)
+ }
+ cert := parsedCertBundle.Certificate
+
+ expected := strutil.ParseDedupLowercaseAndSortStrings(role.Organization, ",")
+ if !reflect.DeepEqual(cert.Subject.Organization, expected) {
+ return fmt.Errorf("Error: returned certificate has Organization of %s but %s was specified in the role.", cert.Subject.Organization, expected)
+ }
+ return nil
+ }
+ }
+
+ // Returns a TestCheckFunc that performs various validity checks on the
+ // returned certificate information, mostly within checkCertsAndPrivateKey
+ getCnCheck := func(name string, role roleEntry, key crypto.Signer, usage x509.KeyUsage, extUsage x509.ExtKeyUsage, validity time.Duration) logicaltest.TestCheckFunc {
+ var certBundle certutil.CertBundle
+ return func(resp *logical.Response) error {
+ err := mapstructure.Decode(resp.Data, &certBundle)
+ if err != nil {
+ return err
+ }
+ parsedCertBundle, err := checkCertsAndPrivateKey(role.KeyType, key, usage, extUsage, validity, &certBundle)
+ if err != nil {
+ return fmt.Errorf("Error checking generated certificate: %s", err)
+ }
+ cert := parsedCertBundle.Certificate
+ if cert.Subject.CommonName != name {
+ return fmt.Errorf("Error: returned certificate has CN of %s but %s was requested", cert.Subject.CommonName, name)
+ }
+ if strings.Contains(cert.Subject.CommonName, "@") {
+ if len(cert.DNSNames) != 0 || len(cert.EmailAddresses) != 1 {
+ return fmt.Errorf("Error: found more than one DNS SAN or not one Email SAN but only one was requested, cert.DNSNames = %#v, cert.EmailAddresses = %#v", cert.DNSNames, cert.EmailAddresses)
+ }
+ } else {
+ if len(cert.DNSNames) != 1 || len(cert.EmailAddresses) != 0 {
+ return fmt.Errorf("Error: found more than one Email SAN or not one DNS SAN but only one was requested, cert.DNSNames = %#v, cert.EmailAddresses = %#v", cert.DNSNames, cert.EmailAddresses)
+ }
+ }
+ var retName string
+ if len(cert.DNSNames) > 0 {
+ retName = cert.DNSNames[0]
+ }
+ if len(cert.EmailAddresses) > 0 {
+ retName = cert.EmailAddresses[0]
+ }
+ if retName != name {
+ return fmt.Errorf("Error: returned certificate has a DNS SAN of %s but %s was requested", retName, name)
+ }
+ return nil
+ }
+ }
+
+ // Common names to test with the various role flags toggled
+ var commonNames struct {
+ Localhost bool `structs:"localhost"`
+ BareDomain bool `structs:"example.com"`
+ SecondDomain bool `structs:"foobar.com"`
+ SubDomain bool `structs:"foo.example.com"`
+ Wildcard bool `structs:"*.example.com"`
+ SubSubdomain bool `structs:"foo.bar.example.com"`
+ SubSubdomainWildcard bool `structs:"*.bar.example.com"`
+ GlobDomain bool `structs:"fooexample.com"`
+ NonHostname bool `structs:"daɪˈɛrɨsɨs"`
+ AnyHost bool `structs:"porkslap.beer"`
+ }
+
+ // Adds a series of tests based on the current selection of
+ // allowed common names; contains some (seeded) randomness
+ //
+ // This allows for a variety of common names to be tested in various
+ // combinations with allowed toggles of the role
+ addCnTests := func() {
+ cnMap := structs.New(commonNames).Map()
+ for name, allowedInt := range cnMap {
+ roleVals.KeyType = "rsa"
+ roleVals.KeyBits = 2048
+ if mathRand.Int()%2 == 1 {
+ roleVals.KeyType = "ec"
+ roleVals.KeyBits = 224
+ }
+
+ roleVals.ServerFlag = false
+ roleVals.ClientFlag = false
+ roleVals.CodeSigningFlag = false
+ roleVals.EmailProtectionFlag = false
+
+ var usage string
+ if mathRand.Int()%2 == 1 {
+ usage = usage + ",DigitalSignature"
+ }
+ if mathRand.Int()%2 == 1 {
+ usage = usage + ",ContentCoMmitment"
+ }
+ if mathRand.Int()%2 == 1 {
+ usage = usage + ",KeyEncipherment"
+ }
+ if mathRand.Int()%2 == 1 {
+ usage = usage + ",DataEncipherment"
+ }
+ if mathRand.Int()%2 == 1 {
+ usage = usage + ",KeyAgreemEnt"
+ }
+ if mathRand.Int()%2 == 1 {
+ usage = usage + ",CertSign"
+ }
+ if mathRand.Int()%2 == 1 {
+ usage = usage + ",CRLSign"
+ }
+ if mathRand.Int()%2 == 1 {
+ usage = usage + ",EncipherOnly"
+ }
+ if mathRand.Int()%2 == 1 {
+ usage = usage + ",DecipherOnly"
+ }
+
+ roleVals.KeyUsage = usage
+ parsedKeyUsage := parseKeyUsages(roleVals.KeyUsage)
+ if parsedKeyUsage == 0 && usage != "" {
+ panic("parsed key usages was zero")
+ }
+ parsedKeyUsageUnderTest = parsedKeyUsage
+
+ var extUsage x509.ExtKeyUsage
+ i := mathRand.Int() % 4
+ switch {
+ case i == 0:
+ extUsage = x509.ExtKeyUsageEmailProtection
+ roleVals.EmailProtectionFlag = true
+ case i == 1:
+ extUsage = x509.ExtKeyUsageServerAuth
+ roleVals.ServerFlag = true
+ case i == 2:
+ extUsage = x509.ExtKeyUsageClientAuth
+ roleVals.ClientFlag = true
+ default:
+ extUsage = x509.ExtKeyUsageCodeSigning
+ roleVals.CodeSigningFlag = true
+ }
+
+ allowed := allowedInt.(bool)
+ issueVals.CommonName = name
+ if roleVals.EmailProtectionFlag {
+ if !strings.HasPrefix(name, "*") {
+ issueVals.CommonName = "user@" + issueVals.CommonName
+ }
+ }
+
+ issueTestStep.ErrorOk = !allowed
+
+ validity, _ := time.ParseDuration(roleVals.MaxTTL)
+
+ var testBitSize int
+
+ if useCSRs {
+ rsaKeyBits := []int{2048, 4096}
+ ecKeyBits := []int{224, 256, 384, 521}
+
+ var privKey crypto.Signer
+ var ok bool
+ switch roleVals.KeyType {
+ case "rsa":
+ roleVals.KeyBits = rsaKeyBits[mathRand.Int()%2]
+
+ // If we don't expect an error already, randomly choose a
+ // key size and expect an error if it's less than the role
+ // setting
+ testBitSize = roleVals.KeyBits
+ if !keybitSizeRandOff && !issueTestStep.ErrorOk {
+ testBitSize = rsaKeyBits[mathRand.Int()%2]
+ }
+
+ if testBitSize < roleVals.KeyBits {
+ issueTestStep.ErrorOk = true
+ }
+
+ privKey, ok = generatedRSAKeys[testBitSize]
+ if !ok {
+ privKey, _ = rsa.GenerateKey(rand.Reader, testBitSize)
+ generatedRSAKeys[testBitSize] = privKey
+ }
+
+ case "ec":
+ roleVals.KeyBits = ecKeyBits[mathRand.Int()%4]
+
+ var curve elliptic.Curve
+
+ // If we don't expect an error already, randomly choose a
+ // key size and expect an error if it's less than the role
+ // setting
+ testBitSize = roleVals.KeyBits
+ if !keybitSizeRandOff && !issueTestStep.ErrorOk {
+ testBitSize = ecKeyBits[mathRand.Int()%4]
+ }
+
+ switch testBitSize {
+ case 224:
+ curve = elliptic.P224()
+ case 256:
+ curve = elliptic.P256()
+ case 384:
+ curve = elliptic.P384()
+ case 521:
+ curve = elliptic.P521()
+ }
+
+ if curve.Params().BitSize < roleVals.KeyBits {
+ issueTestStep.ErrorOk = true
+ }
+
+ privKey, ok = generatedECKeys[testBitSize]
+ if !ok {
+ privKey, _ = ecdsa.GenerateKey(curve, rand.Reader)
+ generatedECKeys[testBitSize] = privKey
+ }
+ }
+ templ := &x509.CertificateRequest{
+ Subject: pkix.Name{
+ CommonName: issueVals.CommonName,
+ },
+ }
+ csr, err := x509.CreateCertificateRequest(rand.Reader, templ, privKey)
+ if err != nil {
+ t.Fatalf("Error creating certificate request: %s", err)
+ }
+ block := pem.Block{
+ Type: "CERTIFICATE REQUEST",
+ Bytes: csr,
+ }
+ issueVals.CSR = strings.TrimSpace(string(pem.EncodeToMemory(&block)))
+
+ addTests(getCnCheck(issueVals.CommonName, roleVals, privKey, x509.KeyUsage(parsedKeyUsage), extUsage, validity))
+ } else {
+ addTests(getCnCheck(issueVals.CommonName, roleVals, nil, x509.KeyUsage(parsedKeyUsage), extUsage, validity))
+ }
+ }
+ }
+
+ // Common Name tests
+ {
+ // common_name not provided
+ issueVals.CommonName = ""
+ issueTestStep.ErrorOk = true
+ addTests(nil)
+
+ // Nothing is allowed
+ addCnTests()
+
+ roleVals.AllowLocalhost = true
+ commonNames.Localhost = true
+ addCnTests()
+
+ roleVals.AllowedDomains = "foobar.com"
+ addCnTests()
+
+ roleVals.AllowedDomains = "example.com"
+ roleVals.AllowSubdomains = true
+ commonNames.SubDomain = true
+ commonNames.Wildcard = true
+ commonNames.SubSubdomain = true
+ commonNames.SubSubdomainWildcard = true
+ addCnTests()
+
+ roleVals.AllowedDomains = "foobar.com,example.com"
+ commonNames.SecondDomain = true
+ roleVals.AllowBareDomains = true
+ commonNames.BareDomain = true
+ addCnTests()
+
+ roleVals.AllowedDomains = "foobar.com,*example.com"
+ roleVals.AllowGlobDomains = true
+ commonNames.GlobDomain = true
+ addCnTests()
+
+ roleVals.AllowAnyName = true
+ roleVals.EnforceHostnames = true
+ commonNames.AnyHost = true
+ addCnTests()
+
+ roleVals.EnforceHostnames = false
+ commonNames.NonHostname = true
+ addCnTests()
+
+ // Ensure that we end up with acceptable key sizes since they won't be
+ // toggled any longer
+ keybitSizeRandOff = true
+ addCnTests()
+ }
+ // OU tests
+ {
+ roleVals.OU = "foo"
+ addTests(getOuCheck(roleVals))
+
+ roleVals.OU = "foo,bar"
+ addTests(getOuCheck(roleVals))
+ }
+ // Organization tests
+ {
+ roleVals.Organization = "system:masters"
+ addTests(getOrganizationCheck(roleVals))
+
+ roleVals.Organization = "foo,bar"
+ addTests(getOrganizationCheck(roleVals))
+ }
+ // IP SAN tests
+ {
+ roleVals.UseCSRSANs = true
+ roleVals.AllowIPSANs = false
+ issueTestStep.ErrorOk = false
+ addTests(nil)
+
+ roleVals.UseCSRSANs = false
+ issueVals.IPSANs = "127.0.0.1,::1"
+ issueTestStep.ErrorOk = true
+ addTests(nil)
+
+ roleVals.AllowIPSANs = true
+ issueTestStep.ErrorOk = false
+ addTests(nil)
+
+ issueVals.IPSANs = "foobar"
+ issueTestStep.ErrorOk = true
+ addTests(nil)
+
+ issueTestStep.ErrorOk = false
+ issueVals.IPSANs = ""
+ }
+
+ // Lease tests
+ {
+ roleTestStep.ErrorOk = true
+ roleVals.Lease = ""
+ roleVals.MaxTTL = ""
+ addTests(nil)
+
+ roleVals.Lease = "12h"
+ roleVals.MaxTTL = "6h"
+ addTests(nil)
+
+ roleTestStep.ErrorOk = false
+ }
+
+ // Listing test
+ ret = append(ret, logicaltest.TestStep{
+ Operation: logical.ListOperation,
+ Path: "roles/",
+ Check: func(resp *logical.Response) error {
+ if resp.Data == nil {
+ return fmt.Errorf("nil data")
+ }
+
+ keysRaw, ok := resp.Data["keys"]
+ if !ok {
+ return fmt.Errorf("no keys found")
+ }
+
+ keys, ok := keysRaw.([]string)
+ if !ok {
+ return fmt.Errorf("could not convert keys to a string list")
+ }
+
+ if len(keys) != 1 {
+ return fmt.Errorf("unexpected keys length of %d", len(keys))
+ }
+
+ if keys[0] != "test" {
+ return fmt.Errorf("unexpected key value of %s", keys[0])
+ }
+
+ return nil
+ },
+ })
+
+ return ret
+}
+
+func TestBackend_PathFetchCertList(t *testing.T) {
+ // create the backend
+ config := logical.TestBackendConfig()
+ storage := &logical.InmemStorage{}
+ config.StorageView = storage
+
+ b := Backend()
+ _, err := b.Setup(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // generate root
+ rootData := map[string]interface{}{
+ "common_name": "test.com",
+ "ttl": "6h",
+ }
+
+ resp, err := b.HandleRequest(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "root/generate/internal",
+ Storage: storage,
+ Data: rootData,
+ })
+ if resp != nil && resp.IsError() {
+ t.Fatalf("failed to generate root, %#v", resp)
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // config urls
+ urlsData := map[string]interface{}{
+ "issuing_certificates": "http://127.0.0.1:8200/v1/pki/ca",
+ "crl_distribution_points": "http://127.0.0.1:8200/v1/pki/crl",
+ }
+
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "config/urls",
+ Storage: storage,
+ Data: urlsData,
+ })
+ if resp != nil && resp.IsError() {
+ t.Fatalf("failed to config urls, %#v", resp)
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // create a role entry
+ roleData := map[string]interface{}{
+ "allowed_domains": "test.com",
+ "allow_subdomains": "true",
+ "max_ttl": "4h",
+ }
+
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "roles/test-example",
+ Storage: storage,
+ Data: roleData,
+ })
+ if resp != nil && resp.IsError() {
+ t.Fatalf("failed to create a role, %#v", resp)
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // issue some certs
+ i := 1
+ for i < 10 {
+ certData := map[string]interface{}{
+ "common_name": "example.test.com",
+ }
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "issue/test-example",
+ Storage: storage,
+ Data: certData,
+ })
+ if resp != nil && resp.IsError() {
+ t.Fatalf("failed to issue a cert, %#v", resp)
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ i = i + 1
+ }
+
+ // list certs
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.ListOperation,
+ Path: "certs",
+ Storage: storage,
+ })
+ if resp != nil && resp.IsError() {
+ t.Fatalf("failed to list certs, %#v", resp)
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ // check that the root and 9 additional certs are all listed
+ if len(resp.Data["keys"].([]string)) != 10 {
+ t.Fatalf("failed to list all 10 certs")
+ }
+
+ // list certs/
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.ListOperation,
+ Path: "certs/",
+ Storage: storage,
+ })
+ if resp != nil && resp.IsError() {
+ t.Fatalf("failed to list certs, %#v", resp)
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ // check that the root and 9 additional certs are all listed
+ if len(resp.Data["keys"].([]string)) != 10 {
+ t.Fatalf("failed to list all 10 certs")
+ }
+}
+
+func TestBackend_SignVerbatim(t *testing.T) {
+ // create the backend
+ config := logical.TestBackendConfig()
+ storage := &logical.InmemStorage{}
+ config.StorageView = storage
+
+ b := Backend()
+ _, err := b.Setup(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // generate root
+ rootData := map[string]interface{}{
+ "common_name": "test.com",
+ "ttl": "172800",
+ }
+
+ resp, err := b.HandleRequest(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "root/generate/internal",
+ Storage: storage,
+ Data: rootData,
+ })
+ if resp != nil && resp.IsError() {
+ t.Fatalf("failed to generate root, %#v", *resp)
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // create a CSR and key
+ key, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ t.Fatal(err)
+ }
+ csrReq := &x509.CertificateRequest{
+ Subject: pkix.Name{
+ CommonName: "foo.bar.com",
+ },
+ }
+ csr, err := x509.CreateCertificateRequest(rand.Reader, csrReq, key)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(csr) == 0 {
+ t.Fatal("generated csr is empty")
+ }
+ pemCSR := pem.EncodeToMemory(&pem.Block{
+ Type: "CERTIFICATE REQUEST",
+ Bytes: csr,
+ })
+ if len(pemCSR) == 0 {
+ t.Fatal("pem csr is empty")
+ }
+
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "sign-verbatim",
+ Storage: storage,
+ Data: map[string]interface{}{
+ "csr": string(pemCSR),
+ },
+ })
+ if resp != nil && resp.IsError() {
+ t.Fatalf("failed to sign-verbatim basic CSR: %#v", *resp)
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp.Secret != nil {
+ t.Fatal("secret is not nil")
+ }
+
+ // create a role entry; we use this to check that sign-verbatim when used with a role is still honoring TTLs
+ roleData := map[string]interface{}{
+ "ttl": "4h",
+ "max_ttl": "8h",
+ }
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "roles/test",
+ Storage: storage,
+ Data: roleData,
+ })
+ if resp != nil && resp.IsError() {
+ t.Fatalf("failed to create a role, %#v", *resp)
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "sign-verbatim/test",
+ Storage: storage,
+ Data: map[string]interface{}{
+ "csr": string(pemCSR),
+ "ttl": "5h",
+ },
+ })
+ if resp != nil && resp.IsError() {
+ t.Fatalf("failed to sign-verbatim ttl'd CSR: %#v", *resp)
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp.Secret != nil {
+ t.Fatal("got a lease when we should not have")
+ }
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "sign-verbatim/test",
+ Storage: storage,
+ Data: map[string]interface{}{
+ "csr": string(pemCSR),
+ "ttl": "12h",
+ },
+ })
+ if resp != nil && !resp.IsError() {
+ t.Fatalf("sign-verbatim signed too-large-ttl'd CSR: %#v", *resp)
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // now check that if we set generate-lease it takes it from the role and the TTLs match
+ roleData = map[string]interface{}{
+ "ttl": "4h",
+ "max_ttl": "8h",
+ "generate_lease": true,
+ }
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "roles/test",
+ Storage: storage,
+ Data: roleData,
+ })
+ if resp != nil && resp.IsError() {
+ t.Fatalf("failed to create a role, %#v", *resp)
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "sign-verbatim/test",
+ Storage: storage,
+ Data: map[string]interface{}{
+ "csr": string(pemCSR),
+ "ttl": "5h",
+ },
+ })
+ if resp != nil && resp.IsError() {
+ t.Fatalf("failed to sign-verbatim role-leased CSR: %#v", *resp)
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp.Secret == nil {
+ t.Fatalf("secret is nil, response is %#v", *resp)
+ }
+ if math.Abs(float64(resp.Secret.TTL-(5*time.Hour))) > float64(5*time.Hour) {
+ t.Fatalf("ttl not default; wanted %v, got %v", b.System().DefaultLeaseTTL(), resp.Secret.TTL)
+ }
+}
+
+const (
+ rsaCAKey string = `-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEAmPQlK7xD5p+E8iLQ8XlVmll5uU2NKMxKY3UF5tbh+0vkc+Fy
+XmutLxxXAyYRPoztZ1g7ocr8XBFYsQPK26TFc3TzrLL7bBEYHQArd8M+VUHjziB7
+zwwpbV7tG8WPqIScDKMNncavDcT8sDg3DUqb8/zWkBD8WEYmsVr1VfKY5pFdxIZU
+kHP3/MkDpGmfrED9K5qPu17dIHTL2VYi4KxKhtIryapZTk6vDwRNfIYJD23QbQnt
+Si1j0X9MTRUf3BIcd0Ch60aGvv0VSL+1NTafsZQD+z1RY/zNp9IUHz5bNIiePZ6l
+JrlddodAAXZ4sN1CMetf4bA2RXssxBEIb5FyiQIDAQABAoIBAGMScSk9DvZJCUIV
+zyU6JHqPzkp6sx5kBSMa37HAKiwt4lI1C3GhaVIEl0/Qzoannfa8rhOEeaXhDoPK
+IxHWTpcUf+mzHSvIfsf6Hi2655stzLLtU4SvKf5P6GF+vCi5jKKa0u0JjsXqfIpg
+Pzh6xT1q3kf+2JUNC28Brbv4IZXmPmqWwu21VN+t3GsMGYgOnEOzBjXMhvNnm9kN
+kznV9Y2y0UIcT4dhbe2VRs4Dp8dGEyrFM7/Ovb3hIJrTkPcxjBbL5eMqpXnIkiW2
+7NyPMWFvX2lGnGdZ1Erh65SVtMjnHFwnSJ8jD+x9RAH9c1LQrYASws3MvMV8Bdzg
+2iljNqECgYEAw3Ow0clLx2alj9qFXcS2ap1lUCJxXZ9UiIU5lOcPxpCpHPloua14
+46rj2EJ9SD1L2kyB5gCq4nGK5uUIx37AJryy1SGzUmtmIVxQLnm6XK6zKnTBk0gx
+gevS6D7fHLDiVGGl3oGw4evibUFCk7dFOb/I/uBRb1zyaJrqOIlDS7UCgYEAyFYi
+RYQbYJJ0k18fUWDKy/P/Rl7uy9D67Qa9+wxoYN2Kh/aQwnNxYHAbwG7Pupd0oGcW
+Yl4bgUliAX3IFGs/cCkPJAIHzwWBPjUDhsJ020TGxKfL4SWP9OaxOpN5TOAixvBY
+ar9aSaKEl7QShmzc/Dknxu58LcoZUwI82pKIGAUCgYAxaHJ/ZcpxOsKJjez+2jZe
+1zEAQ+SyjQ96f2sh+BMl1/XYLDhMD80qiE2WoqA2/b/KDGMd+Hc6TQeW/LjubV03
+raXreNxy7lFgB40BYqY4vbTu+5rfl3VkaW/kY9hU0WY1fIXIrLJBOjb/9WpWGxM1
+2QR/YcdURoPE67xf1FsdrQKBgE8KdNEakzah8e6nLBMOblTTutcH4410sVvNOi2P
+sqrtHZgRNwIRTB0xfjGJRtomoXQb2CANYyq6SjmuZ79upQPan0ekqXILiPeDMRX9
+KN/OHeI/FdiJ2mdUkX476zLih7YX47qSLsw4m7nC6UAyOWomHsSFGWdzglRW4K2X
+/KwFAoGAYQUEWhXp5vpKzAly1ivSH9+sGC59Cujdy50oJSjaw9J+W1fM5WO9z+MH
+CoEpRt8epIgvCBBP2IM7uJUu8i2jQgJ/rrn3NTJgZn2UEPzyxUxbuWnSyueyUsD6
+uhTwBDf8LWOpvdZHMI4CPZ5WJwxAGkvde9xtlzuZUSAlyI2X8m0=
+-----END RSA PRIVATE KEY-----
+`
+ rsaCACert string = `-----BEGIN CERTIFICATE-----
+MIIDljCCAn6gAwIBAgIUQVapfgyAeDH9rAmpw3PQrhMcjRMwDQYJKoZIhvcNAQEL
+BQAwMzExMC8GA1UEAxMoVmF1bHQgVGVzdGluZyBJbnRlcm1lZGlhdGUgU3ViIEF1
+dGhvcml0eTAeFw0xNjA4MDcyMjUzNTRaFw0yNjA3MjQxMDU0MjRaMDcxNTAzBgNV
+BAMTLFZhdWx0IFRlc3RpbmcgSW50ZXJtZWRpYXRlIFN1YiBTdWIgQXV0aG9yaXR5
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmPQlK7xD5p+E8iLQ8XlV
+mll5uU2NKMxKY3UF5tbh+0vkc+FyXmutLxxXAyYRPoztZ1g7ocr8XBFYsQPK26TF
+c3TzrLL7bBEYHQArd8M+VUHjziB7zwwpbV7tG8WPqIScDKMNncavDcT8sDg3DUqb
+8/zWkBD8WEYmsVr1VfKY5pFdxIZUkHP3/MkDpGmfrED9K5qPu17dIHTL2VYi4KxK
+htIryapZTk6vDwRNfIYJD23QbQntSi1j0X9MTRUf3BIcd0Ch60aGvv0VSL+1NTaf
+sZQD+z1RY/zNp9IUHz5bNIiePZ6lJrlddodAAXZ4sN1CMetf4bA2RXssxBEIb5Fy
+iQIDAQABo4GdMIGaMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0G
+A1UdDgQWBBRMeQTX9VkLqb1wzrvN/vFG09yhUTAfBgNVHSMEGDAWgBR0Oq2VTUBE
+dOm6a1sKJTvdZMV5LjA3BgNVHREEMDAugixWYXVsdCBUZXN0aW5nIEludGVybWVk
+aWF0ZSBTdWIgU3ViIEF1dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAQEAagYM7uFa
+tUziraBkuU7cIyX83y7lYFsDhUse2hkpqmgO14oEOwFsDox1Jg2QGt4FEfJoCOXf
+oCZZN8XmaWdSrfgs1nDmtE0xwXiX1z7JuJZ+Ygt3dcRHO1zs5tmuHLxrvMnKfIfG
+bsGmES4mknt0qQ7tGhpyC+KgEmcVL1QQJXNjzCrw5iQ9sgvQt+oCqV28pxOUSYkq
+FdrozmNdJwMgVADywiY/FqYJWgkixlFHQkPR7eiXwpahON+zRMk1JSgr/8N8fRDj
+aqVBRppPzVU9joUME0vOc8cK3VozNe4iRkKNZFelHU2NPPJSDjRLVH9tJ7jPVOEA
+/k6w2PwdoRom7Q==
+-----END CERTIFICATE-----
+`
+
+ rsaCAChain string = `-----BEGIN CERTIFICATE-----
+MIIDijCCAnKgAwIBAgIUOiGo/1EOhRhuupTRGDYnqdALk/swDQYJKoZIhvcNAQEL
+BQAwLzEtMCsGA1UEAxMkVmF1bHQgVGVzdGluZyBJbnRlcm1lZGlhdGUgQXV0aG9y
+aXR5MB4XDTE2MDgwNzIyNTA1MloXDTI2MDcyODE0NTEyMlowMzExMC8GA1UEAxMo
+VmF1bHQgVGVzdGluZyBJbnRlcm1lZGlhdGUgU3ViIEF1dGhvcml0eTCCASIwDQYJ
+KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMTPRQREwW3BEifNcm0XElMRB0GNTXHr
+XCuNoFVsVBlIEsNVQkka+SHZcmNBdEcZLBXP/W3tBT82B48GVN8jyxAGfYZ5hoOQ
+ed3GVft1A7lAnxcGvf5e9kfecKDcBB4G4rBhqdDNcAtklS2hV4uZUcVcEJKggpsQ
+a1wZkCn8eg6sqEYG/SxPouwL52PblxIN+Dd57sBeqx4qdL297XR8LuLkxqftwUCZ
+l2iFBnSDID/06ZmHDXA38I0n3jT2ZGjgPGFnIFKxRGq1vpVc3F5ga8qk+u66ybBu
+xWHzINQrrryjELbl2YBTr6i0R9HnZle6OPcXMWp0JuGjtDC1xb5NmnkCAwEAAaOB
+mTCBljAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU
+dDqtlU1ARHTpumtbCiU73WTFeS4wHwYDVR0jBBgwFoAU+UO/nrlKr4COZCxLZSY/
+ul+YMvMwMwYDVR0RBCwwKoIoVmF1bHQgVGVzdGluZyBJbnRlcm1lZGlhdGUgU3Vi
+IEF1dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAQEAjgCuTXsLFkf0DVkfSsKReNwI
+U/yBcP8Ttbx/ltanJGIVfD5TZoCnNTWm6RkML29ohfxI27sHTUhj+/6Ba0MRiLeI
+FXdclXmHOU2dTHlrmUa0m/4cb5uYoiiEnpmyWL5k94fqPOZAvJcFHnP3db4vsaUW
+47YcOvJbPSJqFXZHadqnsf3Fur5NCeTkIk6yZSvwTaZJT0JIWcqfE5LK3mYAMMC3
+iPaIa1cYqOZhWx9ilQfW6u6WxWeOphGuDIusP7Q4qc2Dr9sekyD59dfIYsroK5TP
+QVJb69nIYINpYdg3l3VNmmkY4G30N9QNs6acaH49rYzLcRX6tLBgPklO6d+TPA==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDejCCAmKgAwIBAgIUULdIdrdK4Y8d+XM9fuOpDlNcJIYwDQYJKoZIhvcNAQEL
+BQAwJzElMCMGA1UEAxMcVmF1bHQgVGVzdGluZyBSb290IEF1dGhvcml0eTAeFw0x
+NjA4MDcyMjUwNTFaFw0yNjA4MDExODUxMjFaMC8xLTArBgNVBAMTJFZhdWx0IFRl
+c3RpbmcgSW50ZXJtZWRpYXRlIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQAD
+ggEPADCCAQoCggEBANXa6U+MDiUrryeZeGxgkmAZdrm9wCKz/6SmxYSebKr8aZwD
+nfbsPLRFxU6BXp9Nc6pP7e8HLBv6PtFTQG389zxOBwAHxZQvUsFESumUd64oTLRG
+J+AErTh7rtSWbLZsgDtQVvpx+6mKkvm53f/aKcq+DbqAFOg6slYOaQix0ZvP/qL0
+iWGIPr1JZk9uBJOUuIUBJdbsgTk+KQqJL9M6up8bCnM0noCafwrNKwZWtsbkfOZE
+OLSycdzCEBeHejpHTIU0vgAkdj63oEy2AbK3hMPxKzNthL3DX6W0tssoVgL//92i
+oSfpDTxiXqqdr+J3accpsAvA+F+D2TqaxdAfjLcCAwEAAaOBlTCBkjAOBgNVHQ8B
+Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU+UO/nrlKr4COZCxL
+ZSY/ul+YMvMwHwYDVR0jBBgwFoAUA3jY4OUWi1Y7zQgM7S9QeXjNgIQwLwYDVR0R
+BCgwJoIkVmF1bHQgVGVzdGluZyBJbnRlcm1lZGlhdGUgQXV0aG9yaXR5MA0GCSqG
+SIb3DQEBCwUAA4IBAQA9VJt92LsOOegtAx35rr41LSSfPWB2SCKg0fphL2gMPO5y
+fE2u8O5TF5dJWJ1fF3cg9/XK30Ohdl1/ujfHbVcX+O6Sb3cgwKTQQOhTdsAZKFRT
+RPHaf/Ja8uqAXMITApxOp7YiYQwukwZr+OsKi66s+zhlfI790PoQbC3UJvgmNDmv
+V5oP63mw4yhqRNPn4NOjzoC/hJSIM0AIdRB1nx2rsSUw0P354R1j9gO43L/Lj33S
+NEaPmw+SC3Tbcx4yxeKnTvGdu3sw/ndmZkCjaq5jxgTy9FONqT45TPJOyk29o5gl
++AVQz5fD2M3C1L/sZIPH2OQbXxePHcsvUZVgaKyk
+-----END CERTIFICATE-----
+`
+
+ ecCAKey string = `-----BEGIN EC PRIVATE KEY-----
+MIGkAgEBBDBP/t89wrC0RFVs0N+jiRuGPptoxI1Iyu42/PzzZWMKYnO7yCWFG/Qv
+zC8cRa8PDqegBwYFK4EEACKhZANiAAQI9e8n9RD6gOd5YpWpDi5AoPbskxQSogxx
+dYFzzHwS0RYIucmlcJ2CuJQNc+9E4dUCMsYr2cAnCgA4iUHzGaje3Fa4O667LVH1
+imAyAj5nbfSd89iNzg4XNPkFjuVNBlE=
+-----END EC PRIVATE KEY-----
+`
+
+ ecCACert string = `-----BEGIN CERTIFICATE-----
+MIIDHzCCAqSgAwIBAgIUEQ4L+8Xl9+/uxU3MMCrd3Bw0HMcwCgYIKoZIzj0EAwIw
+XzEjMCEGA1UEAxMaVmF1bHQgRUMgdGVzdGluZyByb290IGNlcnQxODA2BgNVBAUT
+Lzk3MzY2MDk3NDQ1ODU2MDI3MDY5MDQ0MTkxNjIxODI4NjI0NjM0NTI5MTkzMTU5
+MB4XDTE1MTAwNTE2MzAwMFoXDTM1MDkzMDE2MzAwMFowXzEjMCEGA1UEAxMaVmF1
+bHQgRUMgdGVzdGluZyByb290IGNlcnQxODA2BgNVBAUTLzk3MzY2MDk3NDQ1ODU2
+MDI3MDY5MDQ0MTkxNjIxODI4NjI0NjM0NTI5MTkzMTU5MHYwEAYHKoZIzj0CAQYF
+K4EEACIDYgAECPXvJ/UQ+oDneWKVqQ4uQKD27JMUEqIMcXWBc8x8EtEWCLnJpXCd
+griUDXPvROHVAjLGK9nAJwoAOIlB8xmo3txWuDuuuy1R9YpgMgI+Z230nfPYjc4O
+FzT5BY7lTQZRo4IBHzCCARswDgYDVR0PAQH/BAQDAgGuMBMGA1UdJQQMMAoGCCsG
+AQUFBwMJMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFCIBqs15CiKuj7vqmIW5
+L07WSeLhMB8GA1UdIwQYMBaAFCIBqs15CiKuj7vqmIW5L07WSeLhMEIGCCsGAQUF
+BwEBBDYwNDAyBggrBgEFBQcwAoYmaHR0cDovL3ZhdWx0LmV4YW1wbGUuY29tL3Yx
+L3Jvb3Rwa2kvY2EwJQYDVR0RBB4wHIIaVmF1bHQgRUMgdGVzdGluZyByb290IGNl
+cnQwOAYDVR0fBDEwLzAtoCugKYYnaHR0cDovL3ZhdWx0LmV4YW1wbGUuY29tL3Yx
+L3Jvb3Rwa2kvY3JsMAoGCCqGSM49BAMCA2kAMGYCMQDRrxXskBtXjuZ1tUTk+qae
+3bNVE1oeTDJhe0m3KN7qTykSGslxfEjlv83GYXziiv0CMQDsqu1U9uXPn3ezSbgG
+O30prQ/sanDzNAeJhftoGtNPJDspwx0fzclHvKIhgl3JVUc=
+-----END CERTIFICATE-----
+`
+)
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/ca_util.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/ca_util.go
new file mode 100644
index 0000000..cab5797
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/ca_util.go
@@ -0,0 +1,47 @@
+package pki
+
+import (
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func (b *backend) getGenerationParams(
+ data *framework.FieldData,
+) (exported bool, format string, role *roleEntry, errorResp *logical.Response) {
+ exportedStr := data.Get("exported").(string)
+ switch exportedStr {
+ case "exported":
+ exported = true
+ case "internal":
+ default:
+ errorResp = logical.ErrorResponse(
+ `The "exported" path parameter must be "internal" or "exported"`)
+ return
+ }
+
+ format = getFormat(data)
+ if format == "" {
+ errorResp = logical.ErrorResponse(
+ `The "format" path parameter must be "pem", "der", or "pem_bundle"`)
+ return
+ }
+
+ role = &roleEntry{
+ TTL: data.Get("ttl").(string),
+ KeyType: data.Get("key_type").(string),
+ KeyBits: data.Get("key_bits").(int),
+ AllowLocalhost: true,
+ AllowAnyName: true,
+ AllowIPSANs: true,
+ EnforceHostnames: false,
+ }
+
+ if role.KeyType == "rsa" && role.KeyBits < 2048 {
+ errorResp = logical.ErrorResponse("RSA keys < 2048 bits are unsafe and not supported")
+ return
+ }
+
+ errorResp = validateKeyTypeLength(role.KeyType, role.KeyBits)
+
+ return
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/cert_util.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/cert_util.go
new file mode 100644
index 0000000..1796d98
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/cert_util.go
@@ -0,0 +1,1124 @@
+package pki
+
+import (
+ "bytes"
+ "crypto/ecdsa"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "encoding/pem"
+ "fmt"
+ "net"
+ "regexp"
+ "strings"
+ "time"
+
+ "github.com/hashicorp/vault/helper/certutil"
+ "github.com/hashicorp/vault/helper/errutil"
+ "github.com/hashicorp/vault/helper/parseutil"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ "github.com/ryanuber/go-glob"
+)
+
+type certExtKeyUsage int
+
+const (
+ serverExtKeyUsage certExtKeyUsage = 1 << iota
+ clientExtKeyUsage
+ codeSigningExtKeyUsage
+ emailProtectionExtKeyUsage
+)
+
+type creationBundle struct {
+ CommonName string
+ OU []string
+ Organization []string
+ DNSNames []string
+ EmailAddresses []string
+ IPAddresses []net.IP
+ IsCA bool
+ KeyType string
+ KeyBits int
+ SigningBundle *caInfoBundle
+ TTL time.Duration
+ KeyUsage x509.KeyUsage
+ ExtKeyUsage certExtKeyUsage
+
+ // Only used when signing a CA cert
+ UseCSRValues bool
+
+ // URLs to encode into the certificate
+ URLs *urlEntries
+
+ // The maximum path length to encode
+ MaxPathLength int
+}
+
+type caInfoBundle struct {
+ certutil.ParsedCertBundle
+ URLs *urlEntries
+}
+
+func (b *caInfoBundle) GetCAChain() []*certutil.CertBlock {
+ chain := []*certutil.CertBlock{}
+
+ // Include issuing CA in Chain, not including Root Authority
+ if (len(b.Certificate.AuthorityKeyId) > 0 &&
+ !bytes.Equal(b.Certificate.AuthorityKeyId, b.Certificate.SubjectKeyId)) ||
+ (len(b.Certificate.AuthorityKeyId) == 0 &&
+ !bytes.Equal(b.Certificate.RawIssuer, b.Certificate.RawSubject)) {
+
+ chain = append(chain, &certutil.CertBlock{
+ Certificate: b.Certificate,
+ Bytes: b.CertificateBytes,
+ })
+ if b.CAChain != nil && len(b.CAChain) > 0 {
+ chain = append(chain, b.CAChain...)
+ }
+ }
+
+ return chain
+}
+
+var (
+ hostnameRegex = regexp.MustCompile(`^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$`)
+ oidExtensionBasicConstraints = []int{2, 5, 29, 19}
+)
+
+func oidInExtensions(oid asn1.ObjectIdentifier, extensions []pkix.Extension) bool {
+ for _, e := range extensions {
+ if e.Id.Equal(oid) {
+ return true
+ }
+ }
+ return false
+}
+
+func getFormat(data *framework.FieldData) string {
+ format := data.Get("format").(string)
+ switch format {
+ case "pem":
+ case "der":
+ case "pem_bundle":
+ default:
+ format = ""
+ }
+ return format
+}
+
+func validateKeyTypeLength(keyType string, keyBits int) *logical.Response {
+ switch keyType {
+ case "rsa":
+ switch keyBits {
+ case 2048:
+ case 4096:
+ case 8192:
+ default:
+ return logical.ErrorResponse(fmt.Sprintf(
+ "unsupported bit length for RSA key: %d", keyBits))
+ }
+ case "ec":
+ switch keyBits {
+ case 224:
+ case 256:
+ case 384:
+ case 521:
+ default:
+ return logical.ErrorResponse(fmt.Sprintf(
+ "unsupported bit length for EC key: %d", keyBits))
+ }
+ default:
+ return logical.ErrorResponse(fmt.Sprintf(
+ "unknown key type %s", keyType))
+ }
+
+ return nil
+}
+
+// Fetches the CA info. Unlike other certificates, the CA info is stored
+// in the backend as a CertBundle, because we are storing its private key
+func fetchCAInfo(req *logical.Request) (*caInfoBundle, error) {
+ bundleEntry, err := req.Storage.Get("config/ca_bundle")
+ if err != nil {
+ return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch local CA certificate/key: %v", err)}
+ }
+ if bundleEntry == nil {
+ return nil, errutil.UserError{Err: "backend must be configured with a CA certificate/key"}
+ }
+
+ var bundle certutil.CertBundle
+ if err := bundleEntry.DecodeJSON(&bundle); err != nil {
+ return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode local CA certificate/key: %v", err)}
+ }
+
+ parsedBundle, err := bundle.ToParsedCertBundle()
+ if err != nil {
+ return nil, errutil.InternalError{Err: err.Error()}
+ }
+
+ if parsedBundle.Certificate == nil {
+ return nil, errutil.InternalError{Err: "stored CA information not able to be parsed"}
+ }
+
+ caInfo := &caInfoBundle{*parsedBundle, nil}
+
+ entries, err := getURLs(req)
+ if err != nil {
+ return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch URL information: %v", err)}
+ }
+ if entries == nil {
+ entries = &urlEntries{
+ IssuingCertificates: []string{},
+ CRLDistributionPoints: []string{},
+ OCSPServers: []string{},
+ }
+ }
+ caInfo.URLs = entries
+
+ return caInfo, nil
+}
+
+// Allows fetching certificates from the backend; it handles the slightly
+// separate pathing for CA, CRL, and revoked certificates.
+func fetchCertBySerial(req *logical.Request, prefix, serial string) (*logical.StorageEntry, error) {
+ var path, legacyPath string
+ var err error
+ var certEntry *logical.StorageEntry
+
+ hyphenSerial := normalizeSerial(serial)
+ colonSerial := strings.Replace(strings.ToLower(serial), "-", ":", -1)
+
+ switch {
+ // Revoked goes first as otherwise ca/crl get hardcoded paths which fail if
+ // we actually want revocation info
+ case strings.HasPrefix(prefix, "revoked/"):
+ legacyPath = "revoked/" + colonSerial
+ path = "revoked/" + hyphenSerial
+ case serial == "ca":
+ path = "ca"
+ case serial == "crl":
+ path = "crl"
+ default:
+ legacyPath = "certs/" + colonSerial
+ path = "certs/" + hyphenSerial
+ }
+
+ certEntry, err = req.Storage.Get(path)
+ if err != nil {
+ return nil, errutil.InternalError{Err: fmt.Sprintf("error fetching certificate %s: %s", serial, err)}
+ }
+ if certEntry != nil {
+ if certEntry.Value == nil || len(certEntry.Value) == 0 {
+ return nil, errutil.InternalError{Err: fmt.Sprintf("returned certificate bytes for serial %s were empty", serial)}
+ }
+ return certEntry, nil
+ }
+
+ // If legacyPath is unset, it's going to be a CA or CRL; return immediately
+ if legacyPath == "" {
+ return nil, nil
+ }
+
+ // Retrieve the old-style path
+ certEntry, err = req.Storage.Get(legacyPath)
+ if err != nil {
+ return nil, errutil.InternalError{Err: fmt.Sprintf("error fetching certificate %s: %s", serial, err)}
+ }
+ if certEntry == nil {
+ return nil, nil
+ }
+ if certEntry.Value == nil || len(certEntry.Value) == 0 {
+ return nil, errutil.InternalError{Err: fmt.Sprintf("returned certificate bytes for serial %s were empty", serial)}
+ }
+
+ // Update old-style paths to new-style paths
+ certEntry.Key = path
+ if err = req.Storage.Put(certEntry); err != nil {
+ return nil, errutil.InternalError{Err: fmt.Sprintf("error saving certificate with serial %s to new location", serial)}
+ }
+ if err = req.Storage.Delete(legacyPath); err != nil {
+ return nil, errutil.InternalError{Err: fmt.Sprintf("error deleting certificate with serial %s from old location", serial)}
+ }
+
+ return certEntry, nil
+}
+
+// Given a set of requested names for a certificate, verifies that all of them
+// match the various toggles set in the role for controlling issuance.
+// If one does not pass, it is returned in the string argument.
+func validateNames(req *logical.Request, names []string, role *roleEntry) string {
+ for _, name := range names {
+ sanitizedName := name
+ emailDomain := name
+ isEmail := false
+ isWildcard := false
+
+ // If it has an @, assume it is an email address and separate out the
+ // user from the hostname portion so that we can act on the hostname.
+ // Note that this matches behavior from the alt_names parameter. If it
+ // ends up being problematic for users, I guess that could be separated
+ // into dns_names and email_names in the future to be explicit, but I
+ // don't think this is likely.
+ if strings.Contains(name, "@") {
+ splitEmail := strings.Split(name, "@")
+ if len(splitEmail) != 2 {
+ return name
+ }
+ sanitizedName = splitEmail[1]
+ emailDomain = splitEmail[1]
+ isEmail = true
+ }
+
+ // If we have an asterisk as the first part of the domain name, mark it
+ // as wildcard and set the sanitized name to the remainder of the
+ // domain
+ if strings.HasPrefix(sanitizedName, "*.") {
+ sanitizedName = sanitizedName[2:]
+ isWildcard = true
+ }
+
+ // Email addresses using wildcard domain names do not make sense
+ if isEmail && isWildcard {
+ return name
+ }
+
+ // AllowAnyName is checked after this because EnforceHostnames still
+ // applies when allowing any name. Also, we check the sanitized name to
+ // ensure that we are not either checking a full email address or a
+ // wildcard prefix.
+ if role.EnforceHostnames {
+ if !hostnameRegex.MatchString(sanitizedName) {
+ return name
+ }
+ }
+
+ // Self-explanatory
+ if role.AllowAnyName {
+ continue
+ }
+
+ // The following blocks all work the same basic way:
+ // 1) If a role allows a certain class of base (localhost, token
+ // display name, role-configured domains), perform further tests
+ //
+ // 2) If there is a perfect match on either the name itself or it's an
+ // email address with a perfect match on the hostname portion, allow it
+ //
+ // 3) If subdomains are allowed, we check based on the sanitized name;
+ // note that if not a wildcard, will be equivalent to the email domain
+ // for email checks, and we already checked above for both a wildcard
+ // and email address being present in the same name
+ // 3a) First we check for a non-wildcard subdomain, as in .
+ // 3b) Then we check if it's a wildcard and the base domain is a match
+ //
+ // Variances are noted in-line
+
+ if role.AllowLocalhost {
+ if name == "localhost" ||
+ name == "localdomain" ||
+ (isEmail && emailDomain == "localhost") ||
+ (isEmail && emailDomain == "localdomain") {
+ continue
+ }
+
+ if role.AllowSubdomains {
+ // It is possible, if unlikely, to have a subdomain of "localhost"
+ if strings.HasSuffix(sanitizedName, ".localhost") ||
+ (isWildcard && sanitizedName == "localhost") {
+ continue
+ }
+
+ // A subdomain of "localdomain" is also not entirely uncommon
+ if strings.HasSuffix(sanitizedName, ".localdomain") ||
+ (isWildcard && sanitizedName == "localdomain") {
+ continue
+ }
+ }
+ }
+
+ if role.AllowTokenDisplayName {
+ if name == req.DisplayName {
+ continue
+ }
+
+ if role.AllowSubdomains {
+ if isEmail {
+ // If it's an email address, we need to parse the token
+ // display name in order to do a proper comparison of the
+ // subdomain
+ if strings.Contains(req.DisplayName, "@") {
+ splitDisplay := strings.Split(req.DisplayName, "@")
+ if len(splitDisplay) == 2 {
+ // Compare the sanitized name against the hostname
+ // portion of the email address in the roken
+ // display name
+ if strings.HasSuffix(sanitizedName, "."+splitDisplay[1]) {
+ continue
+ }
+ }
+ }
+ }
+
+ if strings.HasSuffix(sanitizedName, "."+req.DisplayName) ||
+ (isWildcard && sanitizedName == req.DisplayName) {
+ continue
+ }
+ }
+ }
+
+ if role.AllowedDomains != "" {
+ valid := false
+ for _, currDomain := range strings.Split(role.AllowedDomains, ",") {
+ // If there is, say, a trailing comma, ignore it
+ if currDomain == "" {
+ continue
+ }
+
+ // First, allow an exact match of the base domain if that role flag
+ // is enabled
+ if role.AllowBareDomains &&
+ (name == currDomain ||
+ (isEmail && emailDomain == currDomain)) {
+ valid = true
+ break
+ }
+
+ if role.AllowSubdomains {
+ if strings.HasSuffix(sanitizedName, "."+currDomain) ||
+ (isWildcard && sanitizedName == currDomain) {
+ valid = true
+ break
+ }
+ }
+
+ if role.AllowGlobDomains &&
+ strings.Contains(currDomain, "*") &&
+ glob.Glob(currDomain, name) {
+ valid = true
+ break
+ }
+ }
+ if valid {
+ continue
+ }
+ }
+
+ //panic(fmt.Sprintf("\nName is %s\nRole is\n%#v\n", name, role))
+ return name
+ }
+
+ return ""
+}
+
+func generateCert(b *backend,
+ role *roleEntry,
+ signingBundle *caInfoBundle,
+ isCA bool,
+ req *logical.Request,
+ data *framework.FieldData) (*certutil.ParsedCertBundle, error) {
+
+ if role.KeyType == "rsa" && role.KeyBits < 2048 {
+ return nil, errutil.UserError{Err: "RSA keys < 2048 bits are unsafe and not supported"}
+ }
+
+ creationBundle, err := generateCreationBundle(b, role, signingBundle, nil, req, data)
+ if err != nil {
+ return nil, err
+ }
+
+ if isCA {
+ creationBundle.IsCA = isCA
+
+ if signingBundle == nil {
+ // Generating a self-signed root certificate
+ entries, err := getURLs(req)
+ if err != nil {
+ return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch URL information: %v", err)}
+ }
+ if entries == nil {
+ entries = &urlEntries{
+ IssuingCertificates: []string{},
+ CRLDistributionPoints: []string{},
+ OCSPServers: []string{},
+ }
+ }
+ creationBundle.URLs = entries
+
+ if role.MaxPathLength == nil {
+ creationBundle.MaxPathLength = -1
+ } else {
+ creationBundle.MaxPathLength = *role.MaxPathLength
+ }
+ }
+ }
+
+ parsedBundle, err := createCertificate(creationBundle)
+ if err != nil {
+ return nil, err
+ }
+
+ return parsedBundle, nil
+}
+
+// N.B.: This is only meant to be used for generating intermediate CAs.
+// It skips some sanity checks.
+func generateIntermediateCSR(b *backend,
+ role *roleEntry,
+ signingBundle *caInfoBundle,
+ req *logical.Request,
+ data *framework.FieldData) (*certutil.ParsedCSRBundle, error) {
+
+ creationBundle, err := generateCreationBundle(b, role, signingBundle, nil, req, data)
+ if err != nil {
+ return nil, err
+ }
+
+ parsedBundle, err := createCSR(creationBundle)
+ if err != nil {
+ return nil, err
+ }
+
+ return parsedBundle, nil
+}
+
+func signCert(b *backend,
+ role *roleEntry,
+ signingBundle *caInfoBundle,
+ isCA bool,
+ useCSRValues bool,
+ req *logical.Request,
+ data *framework.FieldData) (*certutil.ParsedCertBundle, error) {
+
+ csrString := data.Get("csr").(string)
+ if csrString == "" {
+ return nil, errutil.UserError{Err: fmt.Sprintf("\"csr\" is empty")}
+ }
+
+ pemBytes := []byte(csrString)
+ pemBlock, pemBytes := pem.Decode(pemBytes)
+ if pemBlock == nil {
+ return nil, errutil.UserError{Err: "csr contains no data"}
+ }
+ csr, err := x509.ParseCertificateRequest(pemBlock.Bytes)
+ if err != nil {
+ return nil, errutil.UserError{Err: fmt.Sprintf("certificate request could not be parsed: %v", err)}
+ }
+
+ switch role.KeyType {
+ case "rsa":
+ // Verify that the key matches the role type
+ if csr.PublicKeyAlgorithm != x509.RSA {
+ return nil, errutil.UserError{Err: fmt.Sprintf(
+ "role requires keys of type %s",
+ role.KeyType)}
+ }
+ pubKey, ok := csr.PublicKey.(*rsa.PublicKey)
+ if !ok {
+ return nil, errutil.UserError{Err: "could not parse CSR's public key"}
+ }
+
+ // Verify that the key is at least 2048 bits
+ if pubKey.N.BitLen() < 2048 {
+ return nil, errutil.UserError{Err: "RSA keys < 2048 bits are unsafe and not supported"}
+ }
+
+ // Verify that the bit size is at least the size specified in the role
+ if pubKey.N.BitLen() < role.KeyBits {
+ return nil, errutil.UserError{Err: fmt.Sprintf(
+ "role requires a minimum of a %d-bit key, but CSR's key is %d bits",
+ role.KeyBits,
+ pubKey.N.BitLen())}
+ }
+
+ case "ec":
+ // Verify that the key matches the role type
+ if csr.PublicKeyAlgorithm != x509.ECDSA {
+ return nil, errutil.UserError{Err: fmt.Sprintf(
+ "role requires keys of type %s",
+ role.KeyType)}
+ }
+ pubKey, ok := csr.PublicKey.(*ecdsa.PublicKey)
+ if !ok {
+ return nil, errutil.UserError{Err: "could not parse CSR's public key"}
+ }
+
+ // Verify that the bit size is at least the size specified in the role
+ if pubKey.Params().BitSize < role.KeyBits {
+ return nil, errutil.UserError{Err: fmt.Sprintf(
+ "role requires a minimum of a %d-bit key, but CSR's key is %d bits",
+ role.KeyBits,
+ pubKey.Params().BitSize)}
+ }
+
+ case "any":
+ // We only care about running RSA < 2048 bit checks, so if not RSA
+ // break out
+ if csr.PublicKeyAlgorithm != x509.RSA {
+ break
+ }
+
+ // Run RSA < 2048 bit checks
+ pubKey, ok := csr.PublicKey.(*rsa.PublicKey)
+ if !ok {
+ return nil, errutil.UserError{Err: "could not parse CSR's public key"}
+ }
+ if pubKey.N.BitLen() < 2048 {
+ return nil, errutil.UserError{Err: "RSA keys < 2048 bits are unsafe and not supported"}
+ }
+
+ }
+
+ creationBundle, err := generateCreationBundle(b, role, signingBundle, csr, req, data)
+ if err != nil {
+ return nil, err
+ }
+
+ creationBundle.IsCA = isCA
+ creationBundle.UseCSRValues = useCSRValues
+
+ parsedBundle, err := signCertificate(creationBundle, csr)
+ if err != nil {
+ return nil, err
+ }
+
+ return parsedBundle, nil
+}
+
+// generateCreationBundle is a shared function that reads parameters supplied
+// from the various endpoints and generates a creationBundle with the
+// parameters that can be used to issue or sign
+func generateCreationBundle(b *backend,
+ role *roleEntry,
+ signingBundle *caInfoBundle,
+ csr *x509.CertificateRequest,
+ req *logical.Request,
+ data *framework.FieldData) (*creationBundle, error) {
+ var err error
+ var ok bool
+
+ // Read in names -- CN, DNS and email addresses
+ var cn string
+ dnsNames := []string{}
+ emailAddresses := []string{}
+ {
+ if csr != nil && role.UseCSRCommonName {
+ cn = csr.Subject.CommonName
+ }
+ if cn == "" {
+ cn = data.Get("common_name").(string)
+ if cn == "" {
+ return nil, errutil.UserError{Err: `the common_name field is required, or must be provided in a CSR with "use_csr_common_name" set to true`}
+ }
+ }
+
+ if csr != nil && role.UseCSRSANs {
+ dnsNames = csr.DNSNames
+ emailAddresses = csr.EmailAddresses
+ }
+
+ if !data.Get("exclude_cn_from_sans").(bool) {
+ if strings.Contains(cn, "@") {
+ // Note: emails are not disallowed if the role's email protection
+ // flag is false, because they may well be included for
+ // informational purposes; it is up to the verifying party to
+ // ensure that email addresses in a subject alternate name can be
+ // used for the purpose for which they are presented
+ emailAddresses = append(emailAddresses, cn)
+ } else {
+ dnsNames = append(dnsNames, cn)
+ }
+ }
+
+ if csr == nil || !role.UseCSRSANs {
+ cnAltRaw, ok := data.GetOk("alt_names")
+ if ok {
+ cnAlt := strutil.ParseDedupLowercaseAndSortStrings(cnAltRaw.(string), ",")
+ for _, v := range cnAlt {
+ if strings.Contains(v, "@") {
+ emailAddresses = append(emailAddresses, v)
+ } else {
+ dnsNames = append(dnsNames, v)
+ }
+ }
+ }
+ }
+
+ // Check the CN. This ensures that the CN is checked even if it's
+ // excluded from SANs.
+ badName := validateNames(req, []string{cn}, role)
+ if len(badName) != 0 {
+ return nil, errutil.UserError{Err: fmt.Sprintf(
+ "common name %s not allowed by this role", badName)}
+ }
+
+ // Check for bad email and/or DNS names
+ badName = validateNames(req, dnsNames, role)
+ if len(badName) != 0 {
+ return nil, errutil.UserError{Err: fmt.Sprintf(
+ "subject alternate name %s not allowed by this role", badName)}
+ }
+
+ badName = validateNames(req, emailAddresses, role)
+ if len(badName) != 0 {
+ return nil, errutil.UserError{Err: fmt.Sprintf(
+ "email address %s not allowed by this role", badName)}
+ }
+ }
+
+ // Get and verify any IP SANs
+ ipAddresses := []net.IP{}
+ var ipAltInt interface{}
+ {
+ if csr != nil && role.UseCSRSANs {
+ if len(csr.IPAddresses) > 0 {
+ if !role.AllowIPSANs {
+ return nil, errutil.UserError{Err: fmt.Sprintf(
+ "IP Subject Alternative Names are not allowed in this role, but was provided some via CSR")}
+ }
+ ipAddresses = csr.IPAddresses
+ }
+ } else {
+ ipAltInt, ok = data.GetOk("ip_sans")
+ if ok {
+ ipAlt := ipAltInt.(string)
+ if len(ipAlt) != 0 {
+ if !role.AllowIPSANs {
+ return nil, errutil.UserError{Err: fmt.Sprintf(
+ "IP Subject Alternative Names are not allowed in this role, but was provided %s", ipAlt)}
+ }
+ for _, v := range strings.Split(ipAlt, ",") {
+ parsedIP := net.ParseIP(v)
+ if parsedIP == nil {
+ return nil, errutil.UserError{Err: fmt.Sprintf(
+ "the value '%s' is not a valid IP address", v)}
+ }
+ ipAddresses = append(ipAddresses, parsedIP)
+ }
+ }
+ }
+ }
+ }
+
+ // Set OU (organizationalUnit) values if specified in the role
+ ou := []string{}
+ {
+ if role.OU != "" {
+ ou = strutil.RemoveDuplicates(strutil.ParseStringSlice(role.OU, ","), false)
+ }
+ }
+
+ // Set O (organization) values if specified in the role
+ organization := []string{}
+ {
+ if role.Organization != "" {
+ organization = strutil.RemoveDuplicates(strutil.ParseStringSlice(role.Organization, ","), false)
+ }
+ }
+
+ // Get the TTL and very it against the max allowed
+ var ttlField string
+ var ttl time.Duration
+ var maxTTL time.Duration
+ var ttlFieldInt interface{}
+ {
+ ttlFieldInt, ok = data.GetOk("ttl")
+ if !ok {
+ ttlField = role.TTL
+ } else {
+ ttlField = ttlFieldInt.(string)
+ }
+
+ if len(ttlField) == 0 {
+ ttl = b.System().DefaultLeaseTTL()
+ } else {
+ ttl, err = parseutil.ParseDurationSecond(ttlField)
+ if err != nil {
+ return nil, errutil.UserError{Err: fmt.Sprintf(
+ "invalid requested ttl: %s", err)}
+ }
+ }
+
+ if len(role.MaxTTL) == 0 {
+ maxTTL = b.System().MaxLeaseTTL()
+ } else {
+ maxTTL, err = parseutil.ParseDurationSecond(role.MaxTTL)
+ if err != nil {
+ return nil, errutil.UserError{Err: fmt.Sprintf(
+ "invalid ttl: %s", err)}
+ }
+ }
+
+ if ttl > maxTTL {
+ // Don't error if they were using system defaults, only error if
+ // they specifically chose a bad TTL
+ if len(ttlField) == 0 {
+ ttl = maxTTL
+ } else {
+ return nil, errutil.UserError{Err: fmt.Sprintf(
+ "ttl is larger than maximum allowed (%d)", maxTTL/time.Second)}
+ }
+ }
+
+ // If it's not self-signed, verify that the issued certificate won't be
+ // valid past the lifetime of the CA certificate
+ if signingBundle != nil &&
+ time.Now().Add(ttl).After(signingBundle.Certificate.NotAfter) {
+ return nil, errutil.UserError{Err: fmt.Sprintf(
+ "cannot satisfy request, as TTL is beyond the expiration of the CA certificate")}
+ }
+ }
+
+ // Build up usages
+ var extUsage certExtKeyUsage
+ {
+ if role.ServerFlag {
+ extUsage = extUsage | serverExtKeyUsage
+ }
+ if role.ClientFlag {
+ extUsage = extUsage | clientExtKeyUsage
+ }
+ if role.CodeSigningFlag {
+ extUsage = extUsage | codeSigningExtKeyUsage
+ }
+ if role.EmailProtectionFlag {
+ extUsage = extUsage | emailProtectionExtKeyUsage
+ }
+ }
+
+ creationBundle := &creationBundle{
+ CommonName: cn,
+ OU: ou,
+ Organization: organization,
+ DNSNames: dnsNames,
+ EmailAddresses: emailAddresses,
+ IPAddresses: ipAddresses,
+ KeyType: role.KeyType,
+ KeyBits: role.KeyBits,
+ SigningBundle: signingBundle,
+ TTL: ttl,
+ KeyUsage: x509.KeyUsage(parseKeyUsages(role.KeyUsage)),
+ ExtKeyUsage: extUsage,
+ }
+
+ // Don't deal with URLs or max path length if it's self-signed, as these
+ // normally come from the signing bundle
+ if signingBundle == nil {
+ return creationBundle, nil
+ }
+
+ // This will have been read in from the getURLs function
+ creationBundle.URLs = signingBundle.URLs
+
+ // If the max path length in the role is not nil, it was specified at
+ // generation time with the max_path_length parameter; otherwise derive it
+ // from the signing certificate
+ if role.MaxPathLength != nil {
+ creationBundle.MaxPathLength = *role.MaxPathLength
+ } else {
+ switch {
+ case signingBundle.Certificate.MaxPathLen < 0:
+ creationBundle.MaxPathLength = -1
+ case signingBundle.Certificate.MaxPathLen == 0 &&
+ signingBundle.Certificate.MaxPathLenZero:
+ // The signing function will ensure that we do not issue a CA cert
+ creationBundle.MaxPathLength = 0
+ default:
+ // If this takes it to zero, we handle this case later if
+ // necessary
+ creationBundle.MaxPathLength = signingBundle.Certificate.MaxPathLen - 1
+ }
+ }
+
+ return creationBundle, nil
+}
+
+// addKeyUsages adds approrpiate key usages to the template given the creation
+// information
+func addKeyUsages(creationInfo *creationBundle, certTemplate *x509.Certificate) {
+ if creationInfo.IsCA {
+ certTemplate.KeyUsage = x509.KeyUsage(x509.KeyUsageCertSign | x509.KeyUsageCRLSign)
+ return
+ }
+
+ certTemplate.KeyUsage = creationInfo.KeyUsage
+
+ if creationInfo.ExtKeyUsage&serverExtKeyUsage != 0 {
+ certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageServerAuth)
+ }
+ if creationInfo.ExtKeyUsage&clientExtKeyUsage != 0 {
+ certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageClientAuth)
+ }
+ if creationInfo.ExtKeyUsage&codeSigningExtKeyUsage != 0 {
+ certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageCodeSigning)
+ }
+ if creationInfo.ExtKeyUsage&emailProtectionExtKeyUsage != 0 {
+ certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageEmailProtection)
+ }
+}
+
+// Performs the heavy lifting of creating a certificate. Returns
+// a fully-filled-in ParsedCertBundle.
+func createCertificate(creationInfo *creationBundle) (*certutil.ParsedCertBundle, error) {
+ var err error
+ result := &certutil.ParsedCertBundle{}
+
+ serialNumber, err := certutil.GenerateSerialNumber()
+ if err != nil {
+ return nil, err
+ }
+
+ if err := certutil.GeneratePrivateKey(creationInfo.KeyType,
+ creationInfo.KeyBits,
+ result); err != nil {
+ return nil, err
+ }
+
+ subjKeyID, err := certutil.GetSubjKeyID(result.PrivateKey)
+ if err != nil {
+ return nil, errutil.InternalError{Err: fmt.Sprintf("error getting subject key ID: %s", err)}
+ }
+
+ subject := pkix.Name{
+ CommonName: creationInfo.CommonName,
+ OrganizationalUnit: creationInfo.OU,
+ Organization: creationInfo.Organization,
+ }
+
+ certTemplate := &x509.Certificate{
+ SerialNumber: serialNumber,
+ Subject: subject,
+ NotBefore: time.Now().Add(-30 * time.Second),
+ NotAfter: time.Now().Add(creationInfo.TTL),
+ IsCA: false,
+ SubjectKeyId: subjKeyID,
+ DNSNames: creationInfo.DNSNames,
+ EmailAddresses: creationInfo.EmailAddresses,
+ IPAddresses: creationInfo.IPAddresses,
+ }
+
+ // Add this before calling addKeyUsages
+ if creationInfo.SigningBundle == nil {
+ certTemplate.IsCA = true
+ }
+
+ addKeyUsages(creationInfo, certTemplate)
+
+ certTemplate.IssuingCertificateURL = creationInfo.URLs.IssuingCertificates
+ certTemplate.CRLDistributionPoints = creationInfo.URLs.CRLDistributionPoints
+ certTemplate.OCSPServer = creationInfo.URLs.OCSPServers
+
+ var certBytes []byte
+ if creationInfo.SigningBundle != nil {
+ switch creationInfo.SigningBundle.PrivateKeyType {
+ case certutil.RSAPrivateKey:
+ certTemplate.SignatureAlgorithm = x509.SHA256WithRSA
+ case certutil.ECPrivateKey:
+ certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256
+ }
+
+ caCert := creationInfo.SigningBundle.Certificate
+
+ certBytes, err = x509.CreateCertificate(rand.Reader, certTemplate, caCert, result.PrivateKey.Public(), creationInfo.SigningBundle.PrivateKey)
+ } else {
+ // Creating a self-signed root
+ if creationInfo.MaxPathLength == 0 {
+ certTemplate.MaxPathLen = 0
+ certTemplate.MaxPathLenZero = true
+ } else {
+ certTemplate.MaxPathLen = creationInfo.MaxPathLength
+ }
+
+ switch creationInfo.KeyType {
+ case "rsa":
+ certTemplate.SignatureAlgorithm = x509.SHA256WithRSA
+ case "ec":
+ certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256
+ }
+
+ certTemplate.BasicConstraintsValid = true
+ certBytes, err = x509.CreateCertificate(rand.Reader, certTemplate, certTemplate, result.PrivateKey.Public(), result.PrivateKey)
+ }
+
+ if err != nil {
+ return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)}
+ }
+
+ result.CertificateBytes = certBytes
+ result.Certificate, err = x509.ParseCertificate(certBytes)
+ if err != nil {
+ return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse created certificate: %s", err)}
+ }
+
+ if creationInfo.SigningBundle != nil {
+ if len(creationInfo.SigningBundle.Certificate.AuthorityKeyId) > 0 &&
+ !bytes.Equal(creationInfo.SigningBundle.Certificate.AuthorityKeyId, creationInfo.SigningBundle.Certificate.SubjectKeyId) {
+
+ result.CAChain = []*certutil.CertBlock{
+ &certutil.CertBlock{
+ Certificate: creationInfo.SigningBundle.Certificate,
+ Bytes: creationInfo.SigningBundle.CertificateBytes,
+ },
+ }
+ result.CAChain = append(result.CAChain, creationInfo.SigningBundle.CAChain...)
+ }
+ }
+
+ return result, nil
+}
+
+// Creates a CSR. This is currently only meant for use when
+// generating an intermediate certificate.
+func createCSR(creationInfo *creationBundle) (*certutil.ParsedCSRBundle, error) {
+ var err error
+ result := &certutil.ParsedCSRBundle{}
+
+ if err := certutil.GeneratePrivateKey(creationInfo.KeyType,
+ creationInfo.KeyBits,
+ result); err != nil {
+ return nil, err
+ }
+
+ // Like many root CAs, other information is ignored
+ subject := pkix.Name{
+ CommonName: creationInfo.CommonName,
+ }
+
+ csrTemplate := &x509.CertificateRequest{
+ Subject: subject,
+ DNSNames: creationInfo.DNSNames,
+ EmailAddresses: creationInfo.EmailAddresses,
+ IPAddresses: creationInfo.IPAddresses,
+ }
+
+ switch creationInfo.KeyType {
+ case "rsa":
+ csrTemplate.SignatureAlgorithm = x509.SHA256WithRSA
+ case "ec":
+ csrTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256
+ }
+
+ csr, err := x509.CreateCertificateRequest(rand.Reader, csrTemplate, result.PrivateKey)
+ if err != nil {
+ return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)}
+ }
+
+ result.CSRBytes = csr
+ result.CSR, err = x509.ParseCertificateRequest(csr)
+ if err != nil {
+ return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse created certificate: %v", err)}
+ }
+
+ return result, nil
+}
+
+// Performs the heavy lifting of generating a certificate from a CSR.
+// Returns a ParsedCertBundle sans private keys.
+func signCertificate(creationInfo *creationBundle,
+ csr *x509.CertificateRequest) (*certutil.ParsedCertBundle, error) {
+ switch {
+ case creationInfo == nil:
+ return nil, errutil.UserError{Err: "nil creation info given to signCertificate"}
+ case creationInfo.SigningBundle == nil:
+ return nil, errutil.UserError{Err: "nil signing bundle given to signCertificate"}
+ case csr == nil:
+ return nil, errutil.UserError{Err: "nil csr given to signCertificate"}
+ }
+
+ err := csr.CheckSignature()
+ if err != nil {
+ return nil, errutil.UserError{Err: "request signature invalid"}
+ }
+
+ result := &certutil.ParsedCertBundle{}
+
+ serialNumber, err := certutil.GenerateSerialNumber()
+ if err != nil {
+ return nil, err
+ }
+
+ marshaledKey, err := x509.MarshalPKIXPublicKey(csr.PublicKey)
+ if err != nil {
+ return nil, errutil.InternalError{Err: fmt.Sprintf("error marshalling public key: %s", err)}
+ }
+ subjKeyID := sha1.Sum(marshaledKey)
+
+ subject := pkix.Name{
+ CommonName: creationInfo.CommonName,
+ OrganizationalUnit: creationInfo.OU,
+ Organization: creationInfo.Organization,
+ }
+
+ certTemplate := &x509.Certificate{
+ SerialNumber: serialNumber,
+ Subject: subject,
+ NotBefore: time.Now().Add(-30 * time.Second),
+ NotAfter: time.Now().Add(creationInfo.TTL),
+ SubjectKeyId: subjKeyID[:],
+ }
+
+ switch creationInfo.SigningBundle.PrivateKeyType {
+ case certutil.RSAPrivateKey:
+ certTemplate.SignatureAlgorithm = x509.SHA256WithRSA
+ case certutil.ECPrivateKey:
+ certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256
+ }
+
+ if creationInfo.UseCSRValues {
+ certTemplate.Subject = csr.Subject
+
+ certTemplate.DNSNames = csr.DNSNames
+ certTemplate.EmailAddresses = csr.EmailAddresses
+ certTemplate.IPAddresses = csr.IPAddresses
+
+ certTemplate.ExtraExtensions = csr.Extensions
+ } else {
+ certTemplate.DNSNames = creationInfo.DNSNames
+ certTemplate.EmailAddresses = creationInfo.EmailAddresses
+ certTemplate.IPAddresses = creationInfo.IPAddresses
+ }
+
+ addKeyUsages(creationInfo, certTemplate)
+
+ var certBytes []byte
+ caCert := creationInfo.SigningBundle.Certificate
+
+ certTemplate.IssuingCertificateURL = creationInfo.URLs.IssuingCertificates
+ certTemplate.CRLDistributionPoints = creationInfo.URLs.CRLDistributionPoints
+ certTemplate.OCSPServer = creationInfo.SigningBundle.URLs.OCSPServers
+
+ if creationInfo.IsCA {
+ certTemplate.BasicConstraintsValid = true
+ certTemplate.IsCA = true
+
+ if creationInfo.SigningBundle.Certificate.MaxPathLen == 0 &&
+ creationInfo.SigningBundle.Certificate.MaxPathLenZero {
+ return nil, errutil.UserError{Err: "signing certificate has a max path length of zero, and cannot issue further CA certificates"}
+ }
+
+ certTemplate.MaxPathLen = creationInfo.MaxPathLength
+ if certTemplate.MaxPathLen == 0 {
+ certTemplate.MaxPathLenZero = true
+ }
+ }
+
+ certBytes, err = x509.CreateCertificate(rand.Reader, certTemplate, caCert, csr.PublicKey, creationInfo.SigningBundle.PrivateKey)
+
+ if err != nil {
+ return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)}
+ }
+
+ result.CertificateBytes = certBytes
+ result.Certificate, err = x509.ParseCertificate(certBytes)
+ if err != nil {
+ return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse created certificate: %s", err)}
+ }
+
+ result.CAChain = creationInfo.SigningBundle.GetCAChain()
+
+ return result, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/cert_util_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/cert_util_test.go
new file mode 100644
index 0000000..068a0a6
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/cert_util_test.go
@@ -0,0 +1,124 @@
+package pki
+
+import (
+ "fmt"
+ "testing"
+
+ "strings"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestPki_FetchCertBySerial(t *testing.T) {
+ storage := &logical.InmemStorage{}
+
+ cases := map[string]struct {
+ Req *logical.Request
+ Prefix string
+ Serial string
+ }{
+ "valid cert": {
+ &logical.Request{
+ Storage: storage,
+ },
+ "certs/",
+ "00:00:00:00:00:00:00:00",
+ },
+ "revoked cert": {
+ &logical.Request{
+ Storage: storage,
+ },
+ "revoked/",
+ "11:11:11:11:11:11:11:11",
+ },
+ }
+
+ // Test for colon-based paths in storage
+ for name, tc := range cases {
+ storageKey := fmt.Sprintf("%s%s", tc.Prefix, tc.Serial)
+ err := storage.Put(&logical.StorageEntry{
+ Key: storageKey,
+ Value: []byte("some data"),
+ })
+ if err != nil {
+ t.Fatalf("error writing to storage on %s colon-based storage path: %s", name, err)
+ }
+
+ certEntry, err := fetchCertBySerial(tc.Req, tc.Prefix, tc.Serial)
+ if err != nil {
+ t.Fatalf("error on %s for colon-based storage path: %s", name, err)
+ }
+
+ // Check for non-nil on valid/revoked certs
+ if certEntry == nil {
+ t.Fatalf("nil on %s for colon-based storage path", name)
+ }
+
+ // Ensure that cert serials are converted/updated after fetch
+ expectedKey := tc.Prefix + normalizeSerial(tc.Serial)
+ se, err := storage.Get(expectedKey)
+ if err != nil {
+ t.Fatalf("error on %s for colon-based storage path:%s", name, err)
+ }
+ if strings.Compare(expectedKey, se.Key) != 0 {
+ t.Fatalf("expected: %s, got: %s", expectedKey, certEntry.Key)
+ }
+ }
+
+ // Reset storage
+ storage = &logical.InmemStorage{}
+
+ // Test for hyphen-base paths in storage
+ for name, tc := range cases {
+ storageKey := tc.Prefix + normalizeSerial(tc.Serial)
+ err := storage.Put(&logical.StorageEntry{
+ Key: storageKey,
+ Value: []byte("some data"),
+ })
+ if err != nil {
+ t.Fatalf("error writing to storage on %s hyphen-based storage path: %s", name, err)
+ }
+
+ certEntry, err := fetchCertBySerial(tc.Req, tc.Prefix, tc.Serial)
+ if err != nil || certEntry == nil {
+ t.Fatalf("error on %s for hyphen-based storage path: err: %v, entry: %v", name, err, certEntry)
+ }
+ }
+
+ noConvCases := map[string]struct {
+ Req *logical.Request
+ Prefix string
+ Serial string
+ }{
+ "ca": {
+ &logical.Request{
+ Storage: storage,
+ },
+ "",
+ "ca",
+ },
+ "crl": {
+ &logical.Request{
+ Storage: storage,
+ },
+ "",
+ "crl",
+ },
+ }
+
+ // Test for ca and crl case
+ for name, tc := range noConvCases {
+ err := storage.Put(&logical.StorageEntry{
+ Key: tc.Serial,
+ Value: []byte("some data"),
+ })
+ if err != nil {
+ t.Fatalf("error writing to storage on %s: %s", name, err)
+ }
+
+ certEntry, err := fetchCertBySerial(tc.Req, tc.Prefix, tc.Serial)
+ if err != nil || certEntry == nil {
+ t.Fatalf("error on %s: err: %v, entry: %v", name, err, certEntry)
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/crl_util.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/crl_util.go
new file mode 100644
index 0000000..c40e759
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/crl_util.go
@@ -0,0 +1,203 @@
+package pki
+
+import (
+ "crypto/rand"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "fmt"
+ "time"
+
+ "github.com/hashicorp/vault/helper/errutil"
+ "github.com/hashicorp/vault/logical"
+)
+
+type revocationInfo struct {
+ CertificateBytes []byte `json:"certificate_bytes"`
+ RevocationTime int64 `json:"revocation_time"`
+ RevocationTimeUTC time.Time `json:"revocation_time_utc"`
+}
+
+// Revokes a cert, and tries to be smart about error recovery
+func revokeCert(b *backend, req *logical.Request, serial string, fromLease bool) (*logical.Response, error) {
+ // As this backend is self-contained and this function does not hook into
+ // third parties to manage users or resources, if the mount is tainted,
+ // revocation doesn't matter anyways -- the CRL that would be written will
+ // be immediately blown away by the view being cleared. So we can simply
+ // fast path a successful exit.
+ if b.System().Tainted() {
+ return nil, nil
+ }
+
+ alreadyRevoked := false
+ var revInfo revocationInfo
+
+ revEntry, err := fetchCertBySerial(req, "revoked/", serial)
+ if err != nil {
+ switch err.(type) {
+ case errutil.UserError:
+ return logical.ErrorResponse(err.Error()), nil
+ case errutil.InternalError:
+ return nil, err
+ }
+ }
+ if revEntry != nil {
+ // Set the revocation info to the existing values
+ alreadyRevoked = true
+ err = revEntry.DecodeJSON(&revInfo)
+ if err != nil {
+ return nil, fmt.Errorf("Error decoding existing revocation info")
+ }
+ }
+
+ if !alreadyRevoked {
+ certEntry, err := fetchCertBySerial(req, "certs/", serial)
+ if err != nil {
+ switch err.(type) {
+ case errutil.UserError:
+ return logical.ErrorResponse(err.Error()), nil
+ case errutil.InternalError:
+ return nil, err
+ }
+ }
+ if certEntry == nil {
+ return logical.ErrorResponse(fmt.Sprintf("certificate with serial %s not found", serial)), nil
+ }
+
+ cert, err := x509.ParseCertificate(certEntry.Value)
+ if err != nil {
+ return nil, fmt.Errorf("Error parsing certificate: %s", err)
+ }
+ if cert == nil {
+ return nil, fmt.Errorf("Got a nil certificate")
+ }
+
+ if cert.NotAfter.Before(time.Now()) {
+ return nil, nil
+ }
+
+ // Compatibility: Don't revoke CAs if they had leases. New CAs going
+ // forward aren't issued leases.
+ if cert.IsCA && fromLease {
+ return nil, nil
+ }
+
+ currTime := time.Now()
+ revInfo.CertificateBytes = certEntry.Value
+ revInfo.RevocationTime = currTime.Unix()
+ revInfo.RevocationTimeUTC = currTime.UTC()
+
+ revEntry, err = logical.StorageEntryJSON("revoked/"+normalizeSerial(serial), revInfo)
+ if err != nil {
+ return nil, fmt.Errorf("Error creating revocation entry")
+ }
+
+ err = req.Storage.Put(revEntry)
+ if err != nil {
+ return nil, fmt.Errorf("Error saving revoked certificate to new location")
+ }
+
+ }
+
+ crlErr := buildCRL(b, req)
+ switch crlErr.(type) {
+ case errutil.UserError:
+ return logical.ErrorResponse(fmt.Sprintf("Error during CRL building: %s", crlErr)), nil
+ case errutil.InternalError:
+ return nil, fmt.Errorf("Error encountered during CRL building: %s", crlErr)
+ }
+
+ resp := &logical.Response{
+ Data: map[string]interface{}{
+ "revocation_time": revInfo.RevocationTime,
+ },
+ }
+ if !revInfo.RevocationTimeUTC.IsZero() {
+ resp.Data["revocation_time_rfc3339"] = revInfo.RevocationTimeUTC.Format(time.RFC3339Nano)
+ }
+ return resp, nil
+}
+
+// Builds a CRL by going through the list of revoked certificates and building
+// a new CRL with the stored revocation times and serial numbers.
+func buildCRL(b *backend, req *logical.Request) error {
+ revokedSerials, err := req.Storage.List("revoked/")
+ if err != nil {
+ return errutil.InternalError{Err: fmt.Sprintf("Error fetching list of revoked certs: %s", err)}
+ }
+
+ revokedCerts := []pkix.RevokedCertificate{}
+ var revInfo revocationInfo
+ for _, serial := range revokedSerials {
+ revokedEntry, err := req.Storage.Get("revoked/" + serial)
+ if err != nil {
+ return errutil.InternalError{Err: fmt.Sprintf("Unable to fetch revoked cert with serial %s: %s", serial, err)}
+ }
+ if revokedEntry == nil {
+ return errutil.InternalError{Err: fmt.Sprintf("Revoked certificate entry for serial %s is nil", serial)}
+ }
+ if revokedEntry.Value == nil || len(revokedEntry.Value) == 0 {
+ // TODO: In this case, remove it and continue? How likely is this to
+ // happen? Alternately, could skip it entirely, or could implement a
+ // delete function so that there is a way to remove these
+ return errutil.InternalError{Err: fmt.Sprintf("Found revoked serial but actual certificate is empty")}
+ }
+
+ err = revokedEntry.DecodeJSON(&revInfo)
+ if err != nil {
+ return errutil.InternalError{Err: fmt.Sprintf("Error decoding revocation entry for serial %s: %s", serial, err)}
+ }
+
+ revokedCert, err := x509.ParseCertificate(revInfo.CertificateBytes)
+ if err != nil {
+ return errutil.InternalError{Err: fmt.Sprintf("Unable to parse stored revoked certificate with serial %s: %s", serial, err)}
+ }
+
+ // NOTE: We have to change this to UTC time because the CRL standard
+ // mandates it but Go will happily encode the CRL without this.
+ newRevCert := pkix.RevokedCertificate{
+ SerialNumber: revokedCert.SerialNumber,
+ }
+ if !revInfo.RevocationTimeUTC.IsZero() {
+ newRevCert.RevocationTime = revInfo.RevocationTimeUTC
+ } else {
+ newRevCert.RevocationTime = time.Unix(revInfo.RevocationTime, 0).UTC()
+ }
+ revokedCerts = append(revokedCerts, newRevCert)
+ }
+
+ signingBundle, caErr := fetchCAInfo(req)
+ switch caErr.(type) {
+ case errutil.UserError:
+ return errutil.UserError{Err: fmt.Sprintf("Could not fetch the CA certificate: %s", caErr)}
+ case errutil.InternalError:
+ return errutil.InternalError{Err: fmt.Sprintf("Error fetching CA certificate: %s", caErr)}
+ }
+
+ crlLifetime := b.crlLifetime
+ crlInfo, err := b.CRL(req.Storage)
+ if err != nil {
+ return errutil.InternalError{Err: fmt.Sprintf("Error fetching CRL config information: %s", err)}
+ }
+ if crlInfo != nil {
+ crlDur, err := time.ParseDuration(crlInfo.Expiry)
+ if err != nil {
+ return errutil.InternalError{Err: fmt.Sprintf("Error parsing CRL duration of %s", crlInfo.Expiry)}
+ }
+ crlLifetime = crlDur
+ }
+
+ crlBytes, err := signingBundle.Certificate.CreateCRL(rand.Reader, signingBundle.PrivateKey, revokedCerts, time.Now(), time.Now().Add(crlLifetime))
+ if err != nil {
+ return errutil.InternalError{Err: fmt.Sprintf("Error creating new CRL: %s", err)}
+ }
+
+ err = req.Storage.Put(&logical.StorageEntry{
+ Key: "crl",
+ Value: crlBytes,
+ })
+ if err != nil {
+ return errutil.InternalError{Err: fmt.Sprintf("Error storing CRL: %s", err)}
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/fields.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/fields.go
new file mode 100644
index 0000000..e97a970
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/fields.go
@@ -0,0 +1,148 @@
+package pki
+
+import "github.com/hashicorp/vault/logical/framework"
+
+// addIssueAndSignCommonFields adds fields common to both CA and non-CA issuing
+// and signing
+func addIssueAndSignCommonFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema {
+ fields["exclude_cn_from_sans"] = &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: false,
+ Description: `If true, the Common Name will not be
+included in DNS or Email Subject Alternate Names.
+Defaults to false (CN is included).`,
+ }
+
+ fields["format"] = &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "pem",
+ Description: `Format for returned data. Can be "pem", "der",
+or "pem_bundle". If "pem_bundle" any private
+key and issuing cert will be appended to the
+certificate pem. Defaults to "pem".`,
+ }
+
+ fields["ip_sans"] = &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `The requested IP SANs, if any, in a
+comma-delimited list`,
+ }
+
+ return fields
+}
+
+// addNonCACommonFields adds fields with help text specific to non-CA
+// certificate issuing and signing
+func addNonCACommonFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema {
+ fields = addIssueAndSignCommonFields(fields)
+
+ fields["role"] = &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `The desired role with configuration for this
+request`,
+ }
+
+ fields["common_name"] = &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `The requested common name; if you want more than
+one, specify the alternative names in the
+alt_names map. If email protection is enabled
+in the role, this may be an email address.`,
+ }
+
+ fields["alt_names"] = &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `The requested Subject Alternative Names, if any,
+in a comma-delimited list. If email protection
+is enabled for the role, this may contain
+email addresses.`,
+ }
+
+ fields["ttl"] = &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `The requested Time To Live for the certificate;
+sets the expiration date. If not specified
+the role default, backend default, or system
+default TTL is used, in that order. Cannot
+be later than the role max TTL.`,
+ }
+
+ return fields
+}
+
+// addCACommonFields adds fields with help text specific to CA
+// certificate issuing and signing
+func addCACommonFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema {
+ fields = addIssueAndSignCommonFields(fields)
+
+ fields["alt_names"] = &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `The requested Subject Alternative Names, if any,
+in a comma-delimited list. May contain both
+DNS names and email addresses.`,
+ }
+
+ fields["common_name"] = &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `The requested common name; if you want more than
+one, specify the alternative names in the alt_names
+map. If not specified when signing, the common
+name will be taken from the CSR; other names
+must still be specified in alt_names or ip_sans.`,
+ }
+
+ fields["ttl"] = &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `The requested Time To Live for the certificate;
+sets the expiration date. If not specified
+the role default, backend default, or system
+default TTL is used, in that order. Cannot
+be larger than the mount max TTL. Note:
+this only has an effect when generating
+a CA cert or signing a CA cert, not when
+generating a CSR for an intermediate CA.`,
+ }
+
+ return fields
+}
+
+// addCAKeyGenerationFields adds fields with help text specific to CA key
+// generation and exporting
+func addCAKeyGenerationFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema {
+ fields["exported"] = &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `Must be "internal" or "exported". If set to
+"exported", the generated private key will be
+returned. This is your *only* chance to retrieve
+the private key!`,
+ }
+
+ fields["key_bits"] = &framework.FieldSchema{
+ Type: framework.TypeInt,
+ Default: 2048,
+ Description: `The number of bits to use. You will almost
+certainly want to change this if you adjust
+the key_type.`,
+ }
+
+ fields["key_type"] = &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "rsa",
+ Description: `The type of key to use; defaults to RSA. "rsa"
+and "ec" are the only valid values.`,
+ }
+
+ return fields
+}
+
+// addCAIssueFields adds fields common to CA issuing, e.g. when returning
+// an actual certificate
+func addCAIssueFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema {
+ fields["max_path_length"] = &framework.FieldSchema{
+ Type: framework.TypeInt,
+ Default: -1,
+ Description: "The maximum allowable path length",
+ }
+
+ return fields
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_config_ca.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_config_ca.go
new file mode 100644
index 0000000..c182553
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_config_ca.go
@@ -0,0 +1,133 @@
+package pki
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/vault/helper/certutil"
+ "github.com/hashicorp/vault/helper/errutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathConfigCA(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "config/ca",
+ Fields: map[string]*framework.FieldSchema{
+ "pem_bundle": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `PEM-format, concatenated unencrypted
+secret key and certificate, or, if a
+CSR was generated with the "generate"
+endpoint, just the signed certificate.`,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathCAWrite,
+ },
+
+ HelpSynopsis: pathConfigCAHelpSyn,
+ HelpDescription: pathConfigCAHelpDesc,
+ }
+}
+
+func (b *backend) pathCAWrite(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ pemBundle := data.Get("pem_bundle").(string)
+
+ parsedBundle, err := certutil.ParsePEMBundle(pemBundle)
+ if err != nil {
+ switch err.(type) {
+ case errutil.InternalError:
+ return nil, err
+ default:
+ return logical.ErrorResponse(err.Error()), nil
+ }
+ }
+
+ if parsedBundle.PrivateKey == nil ||
+ parsedBundle.PrivateKeyType == certutil.UnknownPrivateKey {
+ return logical.ErrorResponse("private key not found in the PEM bundle"), nil
+ }
+
+ if parsedBundle.Certificate == nil {
+ return logical.ErrorResponse("no certificate found in the PEM bundle"), nil
+ }
+
+ if !parsedBundle.Certificate.IsCA {
+ return logical.ErrorResponse("the given certificate is not marked for CA use and cannot be used with this backend"), nil
+ }
+
+ cb, err := parsedBundle.ToCertBundle()
+ if err != nil {
+ return nil, fmt.Errorf("error converting raw values into cert bundle: %s", err)
+ }
+
+ entry, err := logical.StorageEntryJSON("config/ca_bundle", cb)
+ if err != nil {
+ return nil, err
+ }
+ err = req.Storage.Put(entry)
+ if err != nil {
+ return nil, err
+ }
+
+ // For ease of later use, also store just the certificate at a known
+ // location, plus a fresh CRL
+ entry.Key = "ca"
+ entry.Value = parsedBundle.CertificateBytes
+ err = req.Storage.Put(entry)
+ if err != nil {
+ return nil, err
+ }
+
+ err = buildCRL(b, req)
+
+ return nil, err
+}
+
+const pathConfigCAHelpSyn = `
+Set the CA certificate and private key used for generated credentials.
+`
+
+const pathConfigCAHelpDesc = `
+This sets the CA information used for credentials generated by this
+by this mount. This must be a PEM-format, concatenated unencrypted
+secret key and certificate.
+
+For security reasons, the secret key cannot be retrieved later.
+`
+
+const pathConfigCAGenerateHelpSyn = `
+Generate a new CA certificate and private key used for signing.
+`
+
+const pathConfigCAGenerateHelpDesc = `
+This path generates a CA certificate and private key to be used for
+credentials generated by this mount. The path can either
+end in "internal" or "exported"; this controls whether the
+unencrypted private key is exported after generation. This will
+be your only chance to export the private key; for security reasons
+it cannot be read or exported later.
+
+If the "type" option is set to "self-signed", the generated
+certificate will be a self-signed root CA. Otherwise, this mount
+will act as an intermediate CA; a CSR will be returned, to be signed
+by your chosen CA (which could be another mount of this backend).
+Note that the CRL path will be set to this mount's CRL path; if you
+need further customization it is recommended that you create a CSR
+separately and get it signed. Either way, use the "config/ca/set"
+endpoint to load the signed certificate into Vault.
+`
+
+const pathConfigCASignHelpSyn = `
+Generate a signed CA certificate from a CSR.
+`
+
+const pathConfigCASignHelpDesc = `
+This path generates a CA certificate to be used for credentials
+generated by the certificate's destination mount.
+
+Use the "config/ca/set" endpoint to load the signed certificate
+into Vault another Vault mount.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_config_crl.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_config_crl.go
new file mode 100644
index 0000000..36db15d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_config_crl.go
@@ -0,0 +1,103 @@
+package pki
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+// CRLConfig holds basic CRL configuration information
+type crlConfig struct {
+ Expiry string `json:"expiry" mapstructure:"expiry" structs:"expiry"`
+}
+
+func pathConfigCRL(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "config/crl",
+ Fields: map[string]*framework.FieldSchema{
+ "expiry": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `The amount of time the generated CRL should be
+valid; defaults to 72 hours`,
+ Default: "72h",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathCRLRead,
+ logical.UpdateOperation: b.pathCRLWrite,
+ },
+
+ HelpSynopsis: pathConfigCRLHelpSyn,
+ HelpDescription: pathConfigCRLHelpDesc,
+ }
+}
+
+func (b *backend) CRL(s logical.Storage) (*crlConfig, error) {
+ entry, err := s.Get("config/crl")
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result crlConfig
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+func (b *backend) pathCRLRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ config, err := b.CRL(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+ if config == nil {
+ return nil, nil
+ }
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "expiry": config.Expiry,
+ },
+ }, nil
+}
+
+func (b *backend) pathCRLWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ expiry := d.Get("expiry").(string)
+
+ _, err := time.ParseDuration(expiry)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("Given expiry could not be decoded: %s", err)), nil
+ }
+
+ config := &crlConfig{
+ Expiry: expiry,
+ }
+
+ entry, err := logical.StorageEntryJSON("config/crl", config)
+ if err != nil {
+ return nil, err
+ }
+ err = req.Storage.Put(entry)
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+const pathConfigCRLHelpSyn = `
+Configure the CRL expiration.
+`
+
+const pathConfigCRLHelpDesc = `
+This endpoint allows configuration of the CRL lifetime.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_config_urls.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_config_urls.go
new file mode 100644
index 0000000..42ce1ca
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_config_urls.go
@@ -0,0 +1,167 @@
+package pki
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/asaskevich/govalidator"
+ "github.com/fatih/structs"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathConfigURLs(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "config/urls",
+ Fields: map[string]*framework.FieldSchema{
+ "issuing_certificates": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `Comma-separated list of URLs to be used
+for the issuing certificate attribute`,
+ },
+
+ "crl_distribution_points": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `Comma-separated list of URLs to be used
+for the CRL distribution points attribute`,
+ },
+
+ "ocsp_servers": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `Comma-separated list of URLs to be used
+for the OCSP servers attribute`,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathWriteURL,
+ logical.ReadOperation: b.pathReadURL,
+ },
+
+ HelpSynopsis: pathConfigURLsHelpSyn,
+ HelpDescription: pathConfigURLsHelpDesc,
+ }
+}
+
+func validateURLs(urls []string) string {
+ for _, curr := range urls {
+ if !govalidator.IsURL(curr) {
+ return curr
+ }
+ }
+
+ return ""
+}
+
+func getURLs(req *logical.Request) (*urlEntries, error) {
+ entry, err := req.Storage.Get("urls")
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var entries urlEntries
+ if err := entry.DecodeJSON(&entries); err != nil {
+ return nil, err
+ }
+
+ return &entries, nil
+}
+
+func writeURLs(req *logical.Request, entries *urlEntries) error {
+ entry, err := logical.StorageEntryJSON("urls", entries)
+ if err != nil {
+ return err
+ }
+ if entry == nil {
+ return fmt.Errorf("Unable to marshal entry into JSON")
+ }
+
+ err = req.Storage.Put(entry)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (b *backend) pathReadURL(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ entries, err := getURLs(req)
+ if err != nil {
+ return nil, err
+ }
+ if entries == nil {
+ return nil, nil
+ }
+
+ resp := &logical.Response{
+ Data: structs.New(entries).Map(),
+ }
+
+ return resp, nil
+}
+
+func (b *backend) pathWriteURL(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ entries, err := getURLs(req)
+ if err != nil {
+ return nil, err
+ }
+ if entries == nil {
+ entries = &urlEntries{
+ IssuingCertificates: []string{},
+ CRLDistributionPoints: []string{},
+ OCSPServers: []string{},
+ }
+ }
+
+ if urlsInt, ok := data.GetOk("issuing_certificates"); ok {
+ splitURLs := strings.Split(urlsInt.(string), ",")
+ entries.IssuingCertificates = splitURLs
+ if badURL := validateURLs(entries.IssuingCertificates); badURL != "" {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "invalid URL found in issuing certificates: %s", badURL)), nil
+ }
+ }
+ if urlsInt, ok := data.GetOk("crl_distribution_points"); ok {
+ splitURLs := strings.Split(urlsInt.(string), ",")
+ entries.CRLDistributionPoints = splitURLs
+ if badURL := validateURLs(entries.CRLDistributionPoints); badURL != "" {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "invalid URL found in CRL distribution points: %s", badURL)), nil
+ }
+ }
+ if urlsInt, ok := data.GetOk("ocsp_servers"); ok {
+ splitURLs := strings.Split(urlsInt.(string), ",")
+ entries.OCSPServers = splitURLs
+ if badURL := validateURLs(entries.OCSPServers); badURL != "" {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "invalid URL found in OCSP servers: %s", badURL)), nil
+ }
+ }
+
+ return nil, writeURLs(req, entries)
+}
+
+type urlEntries struct {
+ IssuingCertificates []string `json:"issuing_certificates" structs:"issuing_certificates" mapstructure:"issuing_certificates"`
+ CRLDistributionPoints []string `json:"crl_distribution_points" structs:"crl_distribution_points" mapstructure:"crl_distribution_points"`
+ OCSPServers []string `json:"ocsp_servers" structs:"ocsp_servers" mapstructure:"ocsp_servers"`
+}
+
+const pathConfigURLsHelpSyn = `
+Set the URLs for the issuing CA, CRL distribution points, and OCSP servers.
+`
+
+const pathConfigURLsHelpDesc = `
+This path allows you to set the issuing CA, CRL distribution points, and
+OCSP server URLs that will be encoded into issued certificates. If these
+values are not set, no such information will be encoded in the issued
+certificates. To delete URLs, simply re-set the appropriate value with an
+empty string.
+
+Multiple URLs can be specified for each type; use commas to separate them.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_fetch.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_fetch.go
new file mode 100644
index 0000000..ed60e75
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_fetch.go
@@ -0,0 +1,265 @@
+package pki
+
+import (
+ "encoding/pem"
+ "fmt"
+
+ "github.com/hashicorp/vault/helper/errutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+// Returns the CA in raw format
+func pathFetchCA(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: `ca(/pem)?`,
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathFetchRead,
+ },
+
+ HelpSynopsis: pathFetchHelpSyn,
+ HelpDescription: pathFetchHelpDesc,
+ }
+}
+
+// Returns the CA chain
+func pathFetchCAChain(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: `(cert/)?ca_chain`,
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathFetchRead,
+ },
+
+ HelpSynopsis: pathFetchHelpSyn,
+ HelpDescription: pathFetchHelpDesc,
+ }
+}
+
+// Returns the CRL in raw format
+func pathFetchCRL(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: `crl(/pem)?`,
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathFetchRead,
+ },
+
+ HelpSynopsis: pathFetchHelpSyn,
+ HelpDescription: pathFetchHelpDesc,
+ }
+}
+
+// Returns any valid (non-revoked) cert. Since "ca" fits the pattern, this path
+// also handles returning the CA cert in a non-raw format.
+func pathFetchValid(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: `cert/(?P[0-9A-Fa-f-:]+)`,
+ Fields: map[string]*framework.FieldSchema{
+ "serial": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `Certificate serial number, in colon- or
+hyphen-separated octal`,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathFetchRead,
+ },
+
+ HelpSynopsis: pathFetchHelpSyn,
+ HelpDescription: pathFetchHelpDesc,
+ }
+}
+
+// This returns the CRL in a non-raw format
+func pathFetchCRLViaCertPath(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: `cert/crl`,
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathFetchRead,
+ },
+
+ HelpSynopsis: pathFetchHelpSyn,
+ HelpDescription: pathFetchHelpDesc,
+ }
+}
+
+// This returns the list of serial numbers for certs
+func pathFetchListCerts(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "certs/?$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.pathFetchCertList,
+ },
+
+ HelpSynopsis: pathFetchHelpSyn,
+ HelpDescription: pathFetchHelpDesc,
+ }
+}
+
+func (b *backend) pathFetchCertList(req *logical.Request, data *framework.FieldData) (response *logical.Response, retErr error) {
+ entries, err := req.Storage.List("certs/")
+ if err != nil {
+ return nil, err
+ }
+
+ return logical.ListResponse(entries), nil
+}
+
+func (b *backend) pathFetchRead(req *logical.Request, data *framework.FieldData) (response *logical.Response, retErr error) {
+ var serial, pemType, contentType string
+ var certEntry, revokedEntry *logical.StorageEntry
+ var funcErr error
+ var certificate []byte
+ var revocationTime int64
+ response = &logical.Response{
+ Data: map[string]interface{}{},
+ }
+
+ // Some of these need to return raw and some non-raw;
+ // this is basically handled by setting contentType or not.
+ // Errors don't cause an immediate exit, because the raw
+ // paths still need to return raw output.
+
+ switch {
+ case req.Path == "ca" || req.Path == "ca/pem":
+ serial = "ca"
+ contentType = "application/pkix-cert"
+ if req.Path == "ca/pem" {
+ pemType = "CERTIFICATE"
+ }
+ case req.Path == "ca_chain" || req.Path == "cert/ca_chain":
+ serial = "ca_chain"
+ if req.Path == "ca_chain" {
+ contentType = "application/pkix-cert"
+ }
+ case req.Path == "crl" || req.Path == "crl/pem":
+ serial = "crl"
+ contentType = "application/pkix-crl"
+ if req.Path == "crl/pem" {
+ pemType = "X509 CRL"
+ }
+ case req.Path == "cert/crl":
+ serial = "crl"
+ pemType = "X509 CRL"
+ default:
+ serial = data.Get("serial").(string)
+ pemType = "CERTIFICATE"
+ }
+ if len(serial) == 0 {
+ response = logical.ErrorResponse("The serial number must be provided")
+ goto reply
+ }
+
+ if serial == "ca_chain" {
+ caInfo, err := fetchCAInfo(req)
+ switch err.(type) {
+ case errutil.UserError:
+ response = logical.ErrorResponse(funcErr.Error())
+ goto reply
+ case errutil.InternalError:
+ retErr = err
+ goto reply
+ }
+
+ caChain := caInfo.GetCAChain()
+ for _, ca := range caChain {
+ block := pem.Block{
+ Type: "CERTIFICATE",
+ Bytes: ca.Bytes,
+ }
+ certificate = append(certificate, pem.EncodeToMemory(&block)...)
+ }
+ goto reply
+ }
+
+ certEntry, funcErr = fetchCertBySerial(req, req.Path, serial)
+ if funcErr != nil {
+ switch funcErr.(type) {
+ case errutil.UserError:
+ response = logical.ErrorResponse(funcErr.Error())
+ goto reply
+ case errutil.InternalError:
+ retErr = funcErr
+ goto reply
+ }
+ }
+ if certEntry == nil {
+ response = logical.ErrorResponse(fmt.Sprintf("certificate with serial %s not found", serial))
+ goto reply
+ }
+
+ certificate = certEntry.Value
+
+ if len(pemType) != 0 {
+ block := pem.Block{
+ Type: pemType,
+ Bytes: certEntry.Value,
+ }
+ certificate = pem.EncodeToMemory(&block)
+ }
+
+ revokedEntry, funcErr = fetchCertBySerial(req, "revoked/", serial)
+ if funcErr != nil {
+ switch funcErr.(type) {
+ case errutil.UserError:
+ response = logical.ErrorResponse(funcErr.Error())
+ goto reply
+ case errutil.InternalError:
+ retErr = funcErr
+ goto reply
+ }
+ }
+ if revokedEntry != nil {
+ var revInfo revocationInfo
+ err := revokedEntry.DecodeJSON(&revInfo)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("Error decoding revocation entry for serial %s: %s", serial, err)), nil
+ }
+ revocationTime = revInfo.RevocationTime
+ }
+
+reply:
+ switch {
+ case len(contentType) != 0:
+ response = &logical.Response{
+ Data: map[string]interface{}{
+ logical.HTTPContentType: contentType,
+ logical.HTTPRawBody: certificate,
+ }}
+ if retErr != nil {
+ if b.Logger().IsWarn() {
+ b.Logger().Warn("Possible error, but cannot return in raw response. Note that an empty CA probably means none was configured, and an empty CRL is possibly correct", "error", retErr)
+ }
+ }
+ retErr = nil
+ if len(certificate) > 0 {
+ response.Data[logical.HTTPStatusCode] = 200
+ } else {
+ response.Data[logical.HTTPStatusCode] = 204
+ }
+ case retErr != nil:
+ response = nil
+ default:
+ response.Data["certificate"] = string(certificate)
+ response.Data["revocation_time"] = revocationTime
+ }
+
+ return
+}
+
+const pathFetchHelpSyn = `
+Fetch a CA, CRL, CA Chain, or non-revoked certificate.
+`
+
+const pathFetchHelpDesc = `
+This allows certificates to be fetched. If using the fetch/ prefix any non-revoked certificate can be fetched.
+
+Using "ca" or "crl" as the value fetches the appropriate information in DER encoding. Add "/pem" to either to get PEM encoding.
+
+Using "ca_chain" as the value fetches the certificate authority trust chain in PEM encoding.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_intermediate.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_intermediate.go
new file mode 100644
index 0000000..71a0455
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_intermediate.go
@@ -0,0 +1,235 @@
+package pki
+
+import (
+ "encoding/base64"
+ "fmt"
+
+ "github.com/hashicorp/vault/helper/certutil"
+ "github.com/hashicorp/vault/helper/errutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathGenerateIntermediate(b *backend) *framework.Path {
+ ret := &framework.Path{
+ Pattern: "intermediate/generate/" + framework.GenericNameRegex("exported"),
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathGenerateIntermediate,
+ },
+
+ HelpSynopsis: pathGenerateIntermediateHelpSyn,
+ HelpDescription: pathGenerateIntermediateHelpDesc,
+ }
+
+ ret.Fields = addCACommonFields(map[string]*framework.FieldSchema{})
+ ret.Fields = addCAKeyGenerationFields(ret.Fields)
+
+ return ret
+}
+
+func pathSetSignedIntermediate(b *backend) *framework.Path {
+ ret := &framework.Path{
+ Pattern: "intermediate/set-signed",
+
+ Fields: map[string]*framework.FieldSchema{
+ "certificate": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `PEM-format certificate. This must be a CA
+certificate with a public key matching the
+previously-generated key from the generation
+endpoint.`,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathSetSignedIntermediate,
+ },
+
+ HelpSynopsis: pathSetSignedIntermediateHelpSyn,
+ HelpDescription: pathSetSignedIntermediateHelpDesc,
+ }
+
+ return ret
+}
+
+func (b *backend) pathGenerateIntermediate(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ var err error
+
+ exported, format, role, errorResp := b.getGenerationParams(data)
+ if errorResp != nil {
+ return errorResp, nil
+ }
+
+ var resp *logical.Response
+ parsedBundle, err := generateIntermediateCSR(b, role, nil, req, data)
+ if err != nil {
+ switch err.(type) {
+ case errutil.UserError:
+ return logical.ErrorResponse(err.Error()), nil
+ case errutil.InternalError:
+ return nil, err
+ }
+ }
+
+ csrb, err := parsedBundle.ToCSRBundle()
+ if err != nil {
+ return nil, fmt.Errorf("Error converting raw CSR bundle to CSR bundle: %s", err)
+ }
+
+ resp = &logical.Response{
+ Data: map[string]interface{}{},
+ }
+
+ switch format {
+ case "pem":
+ resp.Data["csr"] = csrb.CSR
+ if exported {
+ resp.Data["private_key"] = csrb.PrivateKey
+ resp.Data["private_key_type"] = csrb.PrivateKeyType
+ }
+
+ case "pem_bundle":
+ resp.Data["csr"] = csrb.CSR
+ if exported {
+ resp.Data["csr"] = fmt.Sprintf("%s\n%s", csrb.PrivateKey, csrb.CSR)
+ resp.Data["private_key"] = csrb.PrivateKey
+ resp.Data["private_key_type"] = csrb.PrivateKeyType
+ }
+
+ case "der":
+ resp.Data["csr"] = base64.StdEncoding.EncodeToString(parsedBundle.CSRBytes)
+ if exported {
+ resp.Data["private_key"] = base64.StdEncoding.EncodeToString(parsedBundle.PrivateKeyBytes)
+ resp.Data["private_key_type"] = csrb.PrivateKeyType
+ }
+ }
+
+ cb := &certutil.CertBundle{}
+ cb.PrivateKey = csrb.PrivateKey
+ cb.PrivateKeyType = csrb.PrivateKeyType
+
+ entry, err := logical.StorageEntryJSON("config/ca_bundle", cb)
+ if err != nil {
+ return nil, err
+ }
+ err = req.Storage.Put(entry)
+ if err != nil {
+ return nil, err
+ }
+
+ return resp, nil
+}
+
+func (b *backend) pathSetSignedIntermediate(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ cert := data.Get("certificate").(string)
+
+ if cert == "" {
+ return logical.ErrorResponse("no certificate provided in the \"certficate\" parameter"), nil
+ }
+
+ inputBundle, err := certutil.ParsePEMBundle(cert)
+ if err != nil {
+ switch err.(type) {
+ case errutil.InternalError:
+ return nil, err
+ default:
+ return logical.ErrorResponse(err.Error()), nil
+ }
+ }
+
+ if inputBundle.Certificate == nil {
+ return logical.ErrorResponse("supplied certificate could not be successfully parsed"), nil
+ }
+
+ cb := &certutil.CertBundle{}
+ entry, err := req.Storage.Get("config/ca_bundle")
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return logical.ErrorResponse("could not find any existing entry with a private key"), nil
+ }
+
+ err = entry.DecodeJSON(cb)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(cb.PrivateKey) == 0 || cb.PrivateKeyType == "" {
+ return logical.ErrorResponse("could not find an existing private key"), nil
+ }
+
+ parsedCB, err := cb.ToParsedCertBundle()
+ if err != nil {
+ return nil, err
+ }
+ if parsedCB.PrivateKey == nil {
+ return nil, fmt.Errorf("saved key could not be parsed successfully")
+ }
+
+ inputBundle.PrivateKey = parsedCB.PrivateKey
+ inputBundle.PrivateKeyType = parsedCB.PrivateKeyType
+ inputBundle.PrivateKeyBytes = parsedCB.PrivateKeyBytes
+
+ if !inputBundle.Certificate.IsCA {
+ return logical.ErrorResponse("the given certificate is not marked for CA use and cannot be used with this backend"), nil
+ }
+
+ if err := inputBundle.Verify(); err != nil {
+ return nil, fmt.Errorf("verification of parsed bundle failed: %s", err)
+ }
+
+ cb, err = inputBundle.ToCertBundle()
+ if err != nil {
+ return nil, fmt.Errorf("error converting raw values into cert bundle: %s", err)
+ }
+
+ entry, err = logical.StorageEntryJSON("config/ca_bundle", cb)
+ if err != nil {
+ return nil, err
+ }
+ err = req.Storage.Put(entry)
+ if err != nil {
+ return nil, err
+ }
+
+ entry.Key = "certs/" + normalizeSerial(cb.SerialNumber)
+ entry.Value = inputBundle.CertificateBytes
+ err = req.Storage.Put(entry)
+ if err != nil {
+ return nil, err
+ }
+
+ // For ease of later use, also store just the certificate at a known
+ // location
+ entry.Key = "ca"
+ entry.Value = inputBundle.CertificateBytes
+ err = req.Storage.Put(entry)
+ if err != nil {
+ return nil, err
+ }
+
+ // Build a fresh CRL
+ err = buildCRL(b, req)
+
+ return nil, err
+}
+
+const pathGenerateIntermediateHelpSyn = `
+Generate a new CSR and private key used for signing.
+`
+
+const pathGenerateIntermediateHelpDesc = `
+See the API documentation for more information.
+`
+
+const pathSetSignedIntermediateHelpSyn = `
+Provide the signed intermediate CA cert.
+`
+
+const pathSetSignedIntermediateHelpDesc = `
+See the API documentation for more information.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_issue_sign.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_issue_sign.go
new file mode 100644
index 0000000..26f7421
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_issue_sign.go
@@ -0,0 +1,308 @@
+package pki
+
+import (
+ "encoding/base64"
+ "fmt"
+ "time"
+
+ "github.com/hashicorp/vault/helper/certutil"
+ "github.com/hashicorp/vault/helper/errutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathIssue(b *backend) *framework.Path {
+ ret := &framework.Path{
+ Pattern: "issue/" + framework.GenericNameRegex("role"),
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathIssue,
+ },
+
+ HelpSynopsis: pathIssueHelpSyn,
+ HelpDescription: pathIssueHelpDesc,
+ }
+
+ ret.Fields = addNonCACommonFields(map[string]*framework.FieldSchema{})
+ return ret
+}
+
+func pathSign(b *backend) *framework.Path {
+ ret := &framework.Path{
+ Pattern: "sign/" + framework.GenericNameRegex("role"),
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathSign,
+ },
+
+ HelpSynopsis: pathSignHelpSyn,
+ HelpDescription: pathSignHelpDesc,
+ }
+
+ ret.Fields = addNonCACommonFields(map[string]*framework.FieldSchema{})
+
+ ret.Fields["csr"] = &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "",
+ Description: `PEM-format CSR to be signed.`,
+ }
+
+ return ret
+}
+
+func pathSignVerbatim(b *backend) *framework.Path {
+ ret := &framework.Path{
+ Pattern: "sign-verbatim" + framework.OptionalParamRegex("role"),
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathSignVerbatim,
+ },
+
+ HelpSynopsis: pathSignHelpSyn,
+ HelpDescription: pathSignHelpDesc,
+ }
+
+ ret.Fields = addNonCACommonFields(map[string]*framework.FieldSchema{})
+
+ ret.Fields["csr"] = &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "",
+ Description: `PEM-format CSR to be signed. Values will be
+taken verbatim from the CSR, except for
+basic constraints.`,
+ }
+
+ return ret
+}
+
+// pathIssue issues a certificate and private key from given parameters,
+// subject to role restrictions
+func (b *backend) pathIssue(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role").(string)
+
+ // Get the role
+ role, err := b.getRole(req.Storage, roleName)
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return logical.ErrorResponse(fmt.Sprintf("Unknown role: %s", roleName)), nil
+ }
+
+ return b.pathIssueSignCert(req, data, role, false, false)
+}
+
+// pathSign issues a certificate from a submitted CSR, subject to role
+// restrictions
+func (b *backend) pathSign(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role").(string)
+
+ // Get the role
+ role, err := b.getRole(req.Storage, roleName)
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return logical.ErrorResponse(fmt.Sprintf("Unknown role: %s", roleName)), nil
+ }
+
+ return b.pathIssueSignCert(req, data, role, true, false)
+}
+
+// pathSignVerbatim issues a certificate from a submitted CSR, *not* subject to
+// role restrictions
+func (b *backend) pathSignVerbatim(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+
+ roleName := data.Get("role").(string)
+
+ // Get the role if one was specified
+ role, err := b.getRole(req.Storage, roleName)
+ if err != nil {
+ return nil, err
+ }
+
+ ttl := b.System().DefaultLeaseTTL()
+ maxTTL := b.System().MaxLeaseTTL()
+
+ entry := &roleEntry{
+ TTL: ttl.String(),
+ MaxTTL: maxTTL.String(),
+ AllowLocalhost: true,
+ AllowAnyName: true,
+ AllowIPSANs: true,
+ EnforceHostnames: false,
+ KeyType: "any",
+ UseCSRCommonName: true,
+ UseCSRSANs: true,
+ GenerateLease: new(bool),
+ }
+
+ if role != nil {
+ if role.TTL != "" {
+ entry.TTL = role.TTL
+ }
+ if role.MaxTTL != "" {
+ entry.MaxTTL = role.MaxTTL
+ }
+ entry.NoStore = role.NoStore
+ }
+
+ *entry.GenerateLease = false
+ if role != nil && role.GenerateLease != nil {
+ *entry.GenerateLease = *role.GenerateLease
+ }
+
+ return b.pathIssueSignCert(req, data, entry, true, true)
+}
+
+func (b *backend) pathIssueSignCert(
+ req *logical.Request, data *framework.FieldData, role *roleEntry, useCSR, useCSRValues bool) (*logical.Response, error) {
+ format := getFormat(data)
+ if format == "" {
+ return logical.ErrorResponse(
+ `The "format" path parameter must be "pem", "der", or "pem_bundle"`), nil
+ }
+
+ var caErr error
+ signingBundle, caErr := fetchCAInfo(req)
+ switch caErr.(type) {
+ case errutil.UserError:
+ return nil, errutil.UserError{Err: fmt.Sprintf(
+ "Could not fetch the CA certificate (was one set?): %s", caErr)}
+ case errutil.InternalError:
+ return nil, errutil.InternalError{Err: fmt.Sprintf(
+ "Error fetching CA certificate: %s", caErr)}
+ }
+
+ var parsedBundle *certutil.ParsedCertBundle
+ var err error
+ if useCSR {
+ parsedBundle, err = signCert(b, role, signingBundle, false, useCSRValues, req, data)
+ } else {
+ parsedBundle, err = generateCert(b, role, signingBundle, false, req, data)
+ }
+ if err != nil {
+ switch err.(type) {
+ case errutil.UserError:
+ return logical.ErrorResponse(err.Error()), nil
+ case errutil.InternalError:
+ return nil, err
+ }
+ }
+
+ signingCB, err := signingBundle.ToCertBundle()
+ if err != nil {
+ return nil, fmt.Errorf("Error converting raw signing bundle to cert bundle: %s", err)
+ }
+
+ cb, err := parsedBundle.ToCertBundle()
+ if err != nil {
+ return nil, fmt.Errorf("Error converting raw cert bundle to cert bundle: %s", err)
+ }
+
+ respData := map[string]interface{}{
+ "serial_number": cb.SerialNumber,
+ }
+
+ switch format {
+ case "pem":
+ respData["issuing_ca"] = signingCB.Certificate
+ respData["certificate"] = cb.Certificate
+ if cb.CAChain != nil && len(cb.CAChain) > 0 {
+ respData["ca_chain"] = cb.CAChain
+ }
+ if !useCSR {
+ respData["private_key"] = cb.PrivateKey
+ respData["private_key_type"] = cb.PrivateKeyType
+ }
+
+ case "pem_bundle":
+ respData["issuing_ca"] = signingCB.Certificate
+ respData["certificate"] = cb.ToPEMBundle()
+ if cb.CAChain != nil && len(cb.CAChain) > 0 {
+ respData["ca_chain"] = cb.CAChain
+ }
+ if !useCSR {
+ respData["private_key"] = cb.PrivateKey
+ respData["private_key_type"] = cb.PrivateKeyType
+ }
+
+ case "der":
+ respData["certificate"] = base64.StdEncoding.EncodeToString(parsedBundle.CertificateBytes)
+ respData["issuing_ca"] = base64.StdEncoding.EncodeToString(signingBundle.CertificateBytes)
+
+ var caChain []string
+ for _, caCert := range parsedBundle.CAChain {
+ caChain = append(caChain, base64.StdEncoding.EncodeToString(caCert.Bytes))
+ }
+ if caChain != nil && len(caChain) > 0 {
+ respData["ca_chain"] = caChain
+ }
+
+ if !useCSR {
+ respData["private_key"] = base64.StdEncoding.EncodeToString(parsedBundle.PrivateKeyBytes)
+ respData["private_key_type"] = cb.PrivateKeyType
+ }
+ }
+
+ var resp *logical.Response
+ switch {
+ case role.GenerateLease == nil:
+ return nil, fmt.Errorf("generate lease in role is nil")
+ case *role.GenerateLease == false:
+ // If lease generation is disabled do not populate `Secret` field in
+ // the response
+ resp = &logical.Response{
+ Data: respData,
+ }
+ default:
+ resp = b.Secret(SecretCertsType).Response(
+ respData,
+ map[string]interface{}{
+ "serial_number": cb.SerialNumber,
+ })
+ resp.Secret.TTL = parsedBundle.Certificate.NotAfter.Sub(time.Now())
+ }
+
+ if !role.NoStore {
+ err = req.Storage.Put(&logical.StorageEntry{
+ Key: "certs/" + normalizeSerial(cb.SerialNumber),
+ Value: parsedBundle.CertificateBytes,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("unable to store certificate locally: %v", err)
+ }
+ }
+
+ return resp, nil
+}
+
+const pathIssueHelpSyn = `
+Request a certificate using a certain role with the provided details.
+`
+
+const pathIssueHelpDesc = `
+This path allows requesting a certificate to be issued according to the
+policy of the given role. The certificate will only be issued if the
+requested details are allowed by the role policy.
+
+This path returns a certificate and a private key. If you want a workflow
+that does not expose a private key, generate a CSR locally and use the
+sign path instead.
+`
+
+const pathSignHelpSyn = `
+Request certificates using a certain role with the provided details.
+`
+
+const pathSignHelpDesc = `
+This path allows requesting certificates to be issued according to the
+policy of the given role. The certificate will only be issued if the
+requested common name is allowed by the role policy.
+
+This path requires a CSR; if you want Vault to generate a private key
+for you, use the issue path instead.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_revoke.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_revoke.go
new file mode 100644
index 0000000..2911995
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_revoke.go
@@ -0,0 +1,94 @@
+package pki
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/helper/errutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathRevoke(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: `revoke`,
+ Fields: map[string]*framework.FieldSchema{
+ "serial_number": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `Certificate serial number, in colon- or
+hyphen-separated octal`,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathRevokeWrite,
+ },
+
+ HelpSynopsis: pathRevokeHelpSyn,
+ HelpDescription: pathRevokeHelpDesc,
+ }
+}
+
+func pathRotateCRL(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: `crl/rotate`,
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathRotateCRLRead,
+ },
+
+ HelpSynopsis: pathRotateCRLHelpSyn,
+ HelpDescription: pathRotateCRLHelpDesc,
+ }
+}
+
+func (b *backend) pathRevokeWrite(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ serial := data.Get("serial_number").(string)
+ if len(serial) == 0 {
+ return logical.ErrorResponse("The serial number must be provided"), nil
+ }
+
+ // We store and identify by lowercase colon-separated hex, but other
+ // utilities use dashes and/or uppercase, so normalize
+ serial = strings.Replace(strings.ToLower(serial), "-", ":", -1)
+
+ b.revokeStorageLock.Lock()
+ defer b.revokeStorageLock.Unlock()
+
+ return revokeCert(b, req, serial, false)
+}
+
+func (b *backend) pathRotateCRLRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ b.revokeStorageLock.RLock()
+ defer b.revokeStorageLock.RUnlock()
+
+ crlErr := buildCRL(b, req)
+ switch crlErr.(type) {
+ case errutil.UserError:
+ return logical.ErrorResponse(fmt.Sprintf("Error during CRL building: %s", crlErr)), nil
+ case errutil.InternalError:
+ return nil, fmt.Errorf("Error encountered during CRL building: %s", crlErr)
+ default:
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "success": true,
+ },
+ }, nil
+ }
+}
+
+const pathRevokeHelpSyn = `
+Revoke a certificate by serial number.
+`
+
+const pathRevokeHelpDesc = `
+This allows certificates to be revoked using its serial number. A root token is required.
+`
+
+const pathRotateCRLHelpSyn = `
+Force a rebuild of the CRL.
+`
+
+const pathRotateCRLHelpDesc = `
+Force a rebuild of the CRL. This can be used to remove expired certificates from it if no certificates have been revoked. A root token is required.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_roles.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_roles.go
new file mode 100644
index 0000000..4d9e115
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_roles.go
@@ -0,0 +1,543 @@
+package pki
+
+import (
+ "crypto/x509"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/fatih/structs"
+ "github.com/hashicorp/vault/helper/parseutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathListRoles(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "roles/?$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.pathRoleList,
+ },
+
+ HelpSynopsis: pathListRolesHelpSyn,
+ HelpDescription: pathListRolesHelpDesc,
+ }
+}
+
+func pathRoles(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "roles/" + framework.GenericNameRegex("name"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role",
+ },
+
+ "ttl": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "",
+ Description: `The lease duration if no specific lease duration is
+requested. The lease duration controls the expiration
+of certificates issued by this backend. Defaults to
+the value of max_ttl.`,
+ },
+
+ "max_ttl": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "",
+ Description: "The maximum allowed lease duration",
+ },
+
+ "allow_localhost": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: true,
+ Description: `Whether to allow "localhost" as a valid common
+name in a request`,
+ },
+
+ "allowed_domains": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "",
+ Description: `If set, clients can request certificates for
+subdomains directly beneath these domains, including
+the wildcard subdomains. See the documentation for more
+information. This parameter accepts a comma-separated list
+of domains.`,
+ },
+
+ "allow_bare_domains": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: false,
+ Description: `If set, clients can request certificates
+for the base domains themselves, e.g. "example.com".
+This is a separate option as in some cases this can
+be considered a security threat.`,
+ },
+
+ "allow_subdomains": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: false,
+ Description: `If set, clients can request certificates for
+subdomains of the CNs allowed by the other role options,
+including wildcard subdomains. See the documentation for
+more information.`,
+ },
+
+ "allow_glob_domains": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: false,
+ Description: `If set, domains specified in "allowed_domains"
+can include glob patterns, e.g. "ftp*.example.com". See
+the documentation for more information.`,
+ },
+
+ "allow_any_name": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: false,
+ Description: `If set, clients can request certificates for
+any CN they like. See the documentation for more
+information.`,
+ },
+
+ "enforce_hostnames": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: true,
+ Description: `If set, only valid host names are allowed for
+CN and SANs. Defaults to true.`,
+ },
+
+ "allow_ip_sans": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: true,
+ Description: `If set, IP Subject Alternative Names are allowed.
+Any valid IP is accepted.`,
+ },
+
+ "server_flag": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: true,
+ Description: `If set, certificates are flagged for server auth use.
+Defaults to true.`,
+ },
+
+ "client_flag": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: true,
+ Description: `If set, certificates are flagged for client auth use.
+Defaults to true.`,
+ },
+
+ "code_signing_flag": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: false,
+ Description: `If set, certificates are flagged for code signing
+use. Defaults to false.`,
+ },
+
+ "email_protection_flag": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: false,
+ Description: `If set, certificates are flagged for email
+protection use. Defaults to false.`,
+ },
+
+ "key_type": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "rsa",
+ Description: `The type of key to use; defaults to RSA. "rsa"
+and "ec" are the only valid values.`,
+ },
+
+ "key_bits": &framework.FieldSchema{
+ Type: framework.TypeInt,
+ Default: 2048,
+ Description: `The number of bits to use. You will almost
+certainly want to change this if you adjust
+the key_type.`,
+ },
+
+ "key_usage": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "DigitalSignature,KeyAgreement,KeyEncipherment",
+ Description: `A comma-separated set of key usages (not extended
+key usages). Valid values can be found at
+https://golang.org/pkg/crypto/x509/#KeyUsage
+-- simply drop the "KeyUsage" part of the name.
+To remove all key usages from being set, set
+this value to an empty string.`,
+ },
+
+ "use_csr_common_name": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: true,
+ Description: `If set, when used with a signing profile,
+the common name in the CSR will be used. This
+does *not* include any requested Subject Alternative
+Names. Defaults to true.`,
+ },
+
+ "use_csr_sans": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: true,
+ Description: `If set, when used with a signing profile,
+the SANs in the CSR will be used. This does *not*
+include the Common Name (cn). Defaults to true.`,
+ },
+
+ "ou": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "",
+ Description: `If set, the OU (OrganizationalUnit) will be set to
+this value in certificates issued by this role.`,
+ },
+
+ "organization": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "",
+ Description: `If set, the O (Organization) will be set to
+this value in certificates issued by this role.`,
+ },
+
+ "generate_lease": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: false,
+ Description: `
+If set, certificates issued/signed against this role will have Vault leases
+attached to them. Defaults to "false". Certificates can be added to the CRL by
+"vault revoke " when certificates are associated with leases. It can
+also be done using the "pki/revoke" endpoint. However, when lease generation is
+disabled, invoking "pki/revoke" would be the only way to add the certificates
+to the CRL. When large number of certificates are generated with long
+lifetimes, it is recommended that lease generation be disabled, as large amount of
+leases adversely affect the startup time of Vault.`,
+ },
+ "no_store": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: false,
+ Description: `
+If set, certificates issued/signed against this role will not be stored in the
+in the storage backend. This can improve performance when issuing large numbers
+of certificates. However, certificates issued in this way cannot be enumerated
+or revoked, so this option is recommended only for certificates that are
+non-sensitive, or extremely short-lived. This option implies a value of "false"
+for "generate_lease".`,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathRoleRead,
+ logical.UpdateOperation: b.pathRoleCreate,
+ logical.DeleteOperation: b.pathRoleDelete,
+ },
+
+ HelpSynopsis: pathRoleHelpSyn,
+ HelpDescription: pathRoleHelpDesc,
+ }
+}
+
+func (b *backend) getRole(s logical.Storage, n string) (*roleEntry, error) {
+ entry, err := s.Get("role/" + n)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result roleEntry
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ // Migrate existing saved entries and save back if changed
+ modified := false
+ if len(result.TTL) == 0 && len(result.Lease) != 0 {
+ result.TTL = result.Lease
+ result.Lease = ""
+ modified = true
+ }
+ if len(result.MaxTTL) == 0 && len(result.LeaseMax) != 0 {
+ result.MaxTTL = result.LeaseMax
+ result.LeaseMax = ""
+ modified = true
+ }
+ if result.AllowBaseDomain {
+ result.AllowBaseDomain = false
+ result.AllowBareDomains = true
+ modified = true
+ }
+ if result.AllowedBaseDomain != "" {
+ found := false
+ allowedDomains := strings.Split(result.AllowedDomains, ",")
+ if len(allowedDomains) != 0 {
+ for _, v := range allowedDomains {
+ if v == result.AllowedBaseDomain {
+ found = true
+ break
+ }
+ }
+ }
+ if !found {
+ if result.AllowedDomains == "" {
+ result.AllowedDomains = result.AllowedBaseDomain
+ } else {
+ result.AllowedDomains += "," + result.AllowedBaseDomain
+ }
+ }
+ result.AllowedBaseDomain = ""
+ modified = true
+ }
+
+ // Upgrade generate_lease in role
+ if result.GenerateLease == nil {
+ // All the new roles will have GenerateLease always set to a value. A
+ // nil value indicates that this role needs an upgrade. Set it to
+ // `true` to not alter its current behavior.
+ result.GenerateLease = new(bool)
+ *result.GenerateLease = true
+ modified = true
+ }
+
+ if modified {
+ jsonEntry, err := logical.StorageEntryJSON("role/"+n, &result)
+ if err != nil {
+ return nil, err
+ }
+ if err := s.Put(jsonEntry); err != nil {
+ return nil, err
+ }
+ }
+
+ return &result, nil
+}
+
+func (b *backend) pathRoleDelete(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ err := req.Storage.Delete("role/" + data.Get("name").(string))
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *backend) pathRoleRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("name").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role name"), nil
+ }
+
+ role, err := b.getRole(req.Storage, roleName)
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ hasMax := true
+ if len(role.MaxTTL) == 0 {
+ role.MaxTTL = "(system default)"
+ hasMax = false
+ }
+ if len(role.TTL) == 0 {
+ if hasMax {
+ role.TTL = "(system default, capped to role max)"
+ } else {
+ role.TTL = "(system default)"
+ }
+ }
+
+ resp := &logical.Response{
+ Data: structs.New(role).Map(),
+ }
+
+ if resp.Data == nil {
+ return nil, fmt.Errorf("error converting role data to response")
+ }
+
+ // These values are deprecated and the entries are migrated on read
+ delete(resp.Data, "lease")
+ delete(resp.Data, "lease_max")
+ delete(resp.Data, "allowed_base_domain")
+
+ return resp, nil
+}
+
+func (b *backend) pathRoleList(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ entries, err := req.Storage.List("role/")
+ if err != nil {
+ return nil, err
+ }
+
+ return logical.ListResponse(entries), nil
+}
+
+func (b *backend) pathRoleCreate(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ var err error
+ name := data.Get("name").(string)
+
+ entry := &roleEntry{
+ MaxTTL: data.Get("max_ttl").(string),
+ TTL: data.Get("ttl").(string),
+ AllowLocalhost: data.Get("allow_localhost").(bool),
+ AllowedDomains: data.Get("allowed_domains").(string),
+ AllowBareDomains: data.Get("allow_bare_domains").(bool),
+ AllowSubdomains: data.Get("allow_subdomains").(bool),
+ AllowGlobDomains: data.Get("allow_glob_domains").(bool),
+ AllowAnyName: data.Get("allow_any_name").(bool),
+ EnforceHostnames: data.Get("enforce_hostnames").(bool),
+ AllowIPSANs: data.Get("allow_ip_sans").(bool),
+ ServerFlag: data.Get("server_flag").(bool),
+ ClientFlag: data.Get("client_flag").(bool),
+ CodeSigningFlag: data.Get("code_signing_flag").(bool),
+ EmailProtectionFlag: data.Get("email_protection_flag").(bool),
+ KeyType: data.Get("key_type").(string),
+ KeyBits: data.Get("key_bits").(int),
+ UseCSRCommonName: data.Get("use_csr_common_name").(bool),
+ UseCSRSANs: data.Get("use_csr_sans").(bool),
+ KeyUsage: data.Get("key_usage").(string),
+ OU: data.Get("ou").(string),
+ Organization: data.Get("organization").(string),
+ GenerateLease: new(bool),
+ NoStore: data.Get("no_store").(bool),
+ }
+
+ // no_store implies generate_lease := false
+ if entry.NoStore {
+ *entry.GenerateLease = false
+ } else {
+ *entry.GenerateLease = data.Get("generate_lease").(bool)
+ }
+
+ if entry.KeyType == "rsa" && entry.KeyBits < 2048 {
+ return logical.ErrorResponse("RSA keys < 2048 bits are unsafe and not supported"), nil
+ }
+
+ var maxTTL time.Duration
+ maxSystemTTL := b.System().MaxLeaseTTL()
+ if len(entry.MaxTTL) == 0 {
+ maxTTL = maxSystemTTL
+ } else {
+ maxTTL, err = parseutil.ParseDurationSecond(entry.MaxTTL)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Invalid max ttl: %s", err)), nil
+ }
+ }
+ if maxTTL > maxSystemTTL {
+ return logical.ErrorResponse("Requested max TTL is higher than backend maximum"), nil
+ }
+
+ ttl := b.System().DefaultLeaseTTL()
+ if len(entry.TTL) != 0 {
+ ttl, err = parseutil.ParseDurationSecond(entry.TTL)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Invalid ttl: %s", err)), nil
+ }
+ }
+ if ttl > maxTTL {
+ // If they are using the system default, cap it to the role max;
+ // if it was specified on the command line, make it an error
+ if len(entry.TTL) == 0 {
+ ttl = maxTTL
+ } else {
+ return logical.ErrorResponse(
+ `"ttl" value must be less than "max_ttl" and/or backend default max lease TTL value`,
+ ), nil
+ }
+ }
+
+ // Persist clamped TTLs
+ entry.TTL = ttl.String()
+ entry.MaxTTL = maxTTL.String()
+
+ if errResp := validateKeyTypeLength(entry.KeyType, entry.KeyBits); errResp != nil {
+ return errResp, nil
+ }
+
+ // Store it
+ jsonEntry, err := logical.StorageEntryJSON("role/"+name, entry)
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(jsonEntry); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func parseKeyUsages(input string) int {
+ var parsedKeyUsages x509.KeyUsage
+ splitKeyUsage := strings.Split(input, ",")
+ for _, k := range splitKeyUsage {
+ switch strings.ToLower(strings.TrimSpace(k)) {
+ case "digitalsignature":
+ parsedKeyUsages |= x509.KeyUsageDigitalSignature
+ case "contentcommitment":
+ parsedKeyUsages |= x509.KeyUsageContentCommitment
+ case "keyencipherment":
+ parsedKeyUsages |= x509.KeyUsageKeyEncipherment
+ case "dataencipherment":
+ parsedKeyUsages |= x509.KeyUsageDataEncipherment
+ case "keyagreement":
+ parsedKeyUsages |= x509.KeyUsageKeyAgreement
+ case "certsign":
+ parsedKeyUsages |= x509.KeyUsageCertSign
+ case "crlsign":
+ parsedKeyUsages |= x509.KeyUsageCRLSign
+ case "encipheronly":
+ parsedKeyUsages |= x509.KeyUsageEncipherOnly
+ case "decipheronly":
+ parsedKeyUsages |= x509.KeyUsageDecipherOnly
+ }
+ }
+
+ return int(parsedKeyUsages)
+}
+
+type roleEntry struct {
+ LeaseMax string `json:"lease_max" structs:"lease_max" mapstructure:"lease_max"`
+ Lease string `json:"lease" structs:"lease" mapstructure:"lease"`
+ MaxTTL string `json:"max_ttl" structs:"max_ttl" mapstructure:"max_ttl"`
+ TTL string `json:"ttl" structs:"ttl" mapstructure:"ttl"`
+ AllowLocalhost bool `json:"allow_localhost" structs:"allow_localhost" mapstructure:"allow_localhost"`
+ AllowedBaseDomain string `json:"allowed_base_domain" structs:"allowed_base_domain" mapstructure:"allowed_base_domain"`
+ AllowedDomains string `json:"allowed_domains" structs:"allowed_domains" mapstructure:"allowed_domains"`
+ AllowBaseDomain bool `json:"allow_base_domain" structs:"allow_base_domain" mapstructure:"allow_base_domain"`
+ AllowBareDomains bool `json:"allow_bare_domains" structs:"allow_bare_domains" mapstructure:"allow_bare_domains"`
+ AllowTokenDisplayName bool `json:"allow_token_displayname" structs:"allow_token_displayname" mapstructure:"allow_token_displayname"`
+ AllowSubdomains bool `json:"allow_subdomains" structs:"allow_subdomains" mapstructure:"allow_subdomains"`
+ AllowGlobDomains bool `json:"allow_glob_domains" structs:"allow_glob_domains" mapstructure:"allow_glob_domains"`
+ AllowAnyName bool `json:"allow_any_name" structs:"allow_any_name" mapstructure:"allow_any_name"`
+ EnforceHostnames bool `json:"enforce_hostnames" structs:"enforce_hostnames" mapstructure:"enforce_hostnames"`
+ AllowIPSANs bool `json:"allow_ip_sans" structs:"allow_ip_sans" mapstructure:"allow_ip_sans"`
+ ServerFlag bool `json:"server_flag" structs:"server_flag" mapstructure:"server_flag"`
+ ClientFlag bool `json:"client_flag" structs:"client_flag" mapstructure:"client_flag"`
+ CodeSigningFlag bool `json:"code_signing_flag" structs:"code_signing_flag" mapstructure:"code_signing_flag"`
+ EmailProtectionFlag bool `json:"email_protection_flag" structs:"email_protection_flag" mapstructure:"email_protection_flag"`
+ UseCSRCommonName bool `json:"use_csr_common_name" structs:"use_csr_common_name" mapstructure:"use_csr_common_name"`
+ UseCSRSANs bool `json:"use_csr_sans" structs:"use_csr_sans" mapstructure:"use_csr_sans"`
+ KeyType string `json:"key_type" structs:"key_type" mapstructure:"key_type"`
+ KeyBits int `json:"key_bits" structs:"key_bits" mapstructure:"key_bits"`
+ MaxPathLength *int `json:",omitempty" structs:"max_path_length,omitempty" mapstructure:"max_path_length"`
+ KeyUsage string `json:"key_usage" structs:"key_usage" mapstructure:"key_usage"`
+ OU string `json:"ou" structs:"ou" mapstructure:"ou"`
+ Organization string `json:"organization" structs:"organization" mapstructure:"organization"`
+ GenerateLease *bool `json:"generate_lease,omitempty" structs:"generate_lease,omitempty"`
+ NoStore bool `json:"no_store" structs:"no_store" mapstructure:"no_store"`
+}
+
+const pathListRolesHelpSyn = `List the existing roles in this backend`
+
+const pathListRolesHelpDesc = `Roles will be listed by the role name.`
+
+const pathRoleHelpSyn = `Manage the roles that can be created with this backend.`
+
+const pathRoleHelpDesc = `This path lets you manage the roles that can be created with this backend.`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_roles_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_roles_test.go
new file mode 100644
index 0000000..82772b0
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_roles_test.go
@@ -0,0 +1,314 @@
+package pki
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/mitchellh/mapstructure"
+)
+
+func createBackendWithStorage(t *testing.T) (*backend, logical.Storage) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+
+ var err error
+ b := Backend()
+ _, err = b.Setup(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = b.Initialize()
+ if err != nil {
+ t.Fatal(err)
+ }
+ return b, config.StorageView
+}
+
+func TestPki_RoleGenerateLease(t *testing.T) {
+ var resp *logical.Response
+ var err error
+ b, storage := createBackendWithStorage(t)
+
+ roleData := map[string]interface{}{
+ "allowed_domains": "myvault.com",
+ "ttl": "5h",
+ }
+
+ roleReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "roles/testrole",
+ Storage: storage,
+ Data: roleData,
+ }
+
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: err: %v resp: %#v", err, resp)
+ }
+
+ roleReq.Operation = logical.ReadOperation
+
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: err: %v resp: %#v", err, resp)
+ }
+
+ // generate_lease cannot be nil. It either has to be set during role
+ // creation or has to be filled in by the upgrade code
+ generateLease := resp.Data["generate_lease"].(*bool)
+ if generateLease == nil {
+ t.Fatalf("generate_lease should not be nil")
+ }
+
+ // By default, generate_lease should be `false`
+ if *generateLease {
+ t.Fatalf("generate_lease should not be set by default")
+ }
+
+ // role.GenerateLease will be nil after the decode
+ var role roleEntry
+ err = mapstructure.Decode(resp.Data, &role)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Make it explicit
+ role.GenerateLease = nil
+
+ entry, err := logical.StorageEntryJSON("role/testrole", role)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := storage.Put(entry); err != nil {
+ t.Fatal(err)
+ }
+
+ // Reading should upgrade generate_lease
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: err: %v resp: %#v", err, resp)
+ }
+
+ generateLease = resp.Data["generate_lease"].(*bool)
+ if generateLease == nil {
+ t.Fatalf("generate_lease should not be nil")
+ }
+
+ // Upgrade should set generate_lease to `true`
+ if !*generateLease {
+ t.Fatalf("generate_lease should be set after an upgrade")
+ }
+
+ // Make sure that setting generate_lease to `true` works properly
+ roleReq.Operation = logical.UpdateOperation
+ roleReq.Path = "roles/testrole2"
+ roleReq.Data["generate_lease"] = true
+
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: err: %v resp: %#v", err, resp)
+ }
+
+ roleReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: err: %v resp: %#v", err, resp)
+ }
+
+ generateLease = resp.Data["generate_lease"].(*bool)
+ if generateLease == nil {
+ t.Fatalf("generate_lease should not be nil")
+ }
+ if !*generateLease {
+ t.Fatalf("generate_lease should have been set")
+ }
+}
+
+func TestPki_RoleNoStore(t *testing.T) {
+ var resp *logical.Response
+ var err error
+ b, storage := createBackendWithStorage(t)
+
+ roleData := map[string]interface{}{
+ "allowed_domains": "myvault.com",
+ "ttl": "5h",
+ }
+
+ roleReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "roles/testrole",
+ Storage: storage,
+ Data: roleData,
+ }
+
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: err: %v resp: %#v", err, resp)
+ }
+
+ roleReq.Operation = logical.ReadOperation
+
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: err: %v resp: %#v", err, resp)
+ }
+
+ // By default, no_store should be `false`
+ noStore := resp.Data["no_store"].(bool)
+ if noStore {
+ t.Fatalf("no_store should not be set by default")
+ }
+
+ // Make sure that setting no_store to `true` works properly
+ roleReq.Operation = logical.UpdateOperation
+ roleReq.Path = "roles/testrole_nostore"
+ roleReq.Data["no_store"] = true
+ roleReq.Data["allowed_domain"] = "myvault.com"
+ roleReq.Data["allow_subdomains"] = true
+ roleReq.Data["ttl"] = "5h"
+
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: err: %v resp: %#v", err, resp)
+ }
+
+ roleReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: err: %v resp: %#v", err, resp)
+ }
+
+ noStore = resp.Data["no_store"].(bool)
+ if !noStore {
+ t.Fatalf("no_store should have been set to true")
+ }
+
+ // issue a certificate and test that it's not stored
+ caData := map[string]interface{}{
+ "common_name": "myvault.com",
+ "ttl": "5h",
+ "ip_sans": "127.0.0.1",
+ }
+ caReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "root/generate/internal",
+ Storage: storage,
+ Data: caData,
+ }
+ resp, err = b.HandleRequest(caReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: err: %v resp: %#v", err, resp)
+ }
+
+ issueData := map[string]interface{}{
+ "common_name": "cert.myvault.com",
+ "format": "pem",
+ "ip_sans": "127.0.0.1",
+ "ttl": "1h",
+ }
+ issueReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "issue/testrole_nostore",
+ Storage: storage,
+ Data: issueData,
+ }
+
+ resp, err = b.HandleRequest(issueReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: err: %v resp: %#v", err, resp)
+ }
+
+ // list certs
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.ListOperation,
+ Path: "certs",
+ Storage: storage,
+ })
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: err: %v resp: %#v", err, resp)
+ }
+ if len(resp.Data["keys"].([]string)) != 1 {
+ t.Fatalf("Only the CA certificate should be stored: %#v", resp)
+ }
+}
+
+func TestPki_CertsLease(t *testing.T) {
+ var resp *logical.Response
+ var err error
+ b, storage := createBackendWithStorage(t)
+
+ caData := map[string]interface{}{
+ "common_name": "myvault.com",
+ "ttl": "5h",
+ "ip_sans": "127.0.0.1",
+ }
+
+ caReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "root/generate/internal",
+ Storage: storage,
+ Data: caData,
+ }
+
+ resp, err = b.HandleRequest(caReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: err: %v resp: %#v", err, resp)
+ }
+
+ roleData := map[string]interface{}{
+ "allowed_domains": "myvault.com",
+ "allow_subdomains": true,
+ "ttl": "2h",
+ }
+
+ roleReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "roles/testrole",
+ Storage: storage,
+ Data: roleData,
+ }
+
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: err: %v resp: %#v", err, resp)
+ }
+
+ issueData := map[string]interface{}{
+ "common_name": "cert.myvault.com",
+ "format": "pem",
+ "ip_sans": "127.0.0.1",
+ }
+ issueReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "issue/testrole",
+ Storage: storage,
+ Data: issueData,
+ }
+
+ resp, err = b.HandleRequest(issueReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: err: %v resp: %#v", err, resp)
+ }
+
+ if resp.Secret != nil {
+ t.Fatalf("expected a response that does not contain a secret")
+ }
+
+ // Turn on the lease generation and issue a certificate. The response
+ // should have a `Secret` object populated.
+ roleData["generate_lease"] = true
+
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: err: %v resp: %#v", err, resp)
+ }
+
+ resp, err = b.HandleRequest(issueReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: err: %v resp: %#v", err, resp)
+ }
+
+ if resp.Secret == nil {
+ t.Fatalf("expected a response that contains a secret")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_root.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_root.go
new file mode 100644
index 0000000..d029531
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_root.go
@@ -0,0 +1,308 @@
+package pki
+
+import (
+ "encoding/base64"
+ "fmt"
+
+ "github.com/hashicorp/vault/helper/errutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathGenerateRoot(b *backend) *framework.Path {
+ ret := &framework.Path{
+ Pattern: "root/generate/" + framework.GenericNameRegex("exported"),
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathCAGenerateRoot,
+ },
+
+ HelpSynopsis: pathGenerateRootHelpSyn,
+ HelpDescription: pathGenerateRootHelpDesc,
+ }
+
+ ret.Fields = addCACommonFields(map[string]*framework.FieldSchema{})
+ ret.Fields = addCAKeyGenerationFields(ret.Fields)
+ ret.Fields = addCAIssueFields(ret.Fields)
+
+ return ret
+}
+
+func pathSignIntermediate(b *backend) *framework.Path {
+ ret := &framework.Path{
+ Pattern: "root/sign-intermediate",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathCASignIntermediate,
+ },
+
+ HelpSynopsis: pathSignIntermediateHelpSyn,
+ HelpDescription: pathSignIntermediateHelpDesc,
+ }
+
+ ret.Fields = addCACommonFields(map[string]*framework.FieldSchema{})
+ ret.Fields = addCAIssueFields(ret.Fields)
+
+ ret.Fields["csr"] = &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "",
+ Description: `PEM-format CSR to be signed.`,
+ }
+
+ ret.Fields["use_csr_values"] = &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: false,
+ Description: `If true, then:
+1) Subject information, including names and alternate
+names, will be preserved from the CSR rather than
+using values provided in the other parameters to
+this path;
+2) Any key usages requested in the CSR will be
+added to the basic set of key usages used for CA
+certs signed by this path; for instance,
+the non-repudiation flag.`,
+ }
+
+ return ret
+}
+
+func (b *backend) pathCAGenerateRoot(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ var err error
+
+ exported, format, role, errorResp := b.getGenerationParams(data)
+ if errorResp != nil {
+ return errorResp, nil
+ }
+
+ maxPathLengthIface, ok := data.GetOk("max_path_length")
+ if ok {
+ maxPathLength := maxPathLengthIface.(int)
+ role.MaxPathLength = &maxPathLength
+ }
+
+ parsedBundle, err := generateCert(b, role, nil, true, req, data)
+ if err != nil {
+ switch err.(type) {
+ case errutil.UserError:
+ return logical.ErrorResponse(err.Error()), nil
+ case errutil.InternalError:
+ return nil, err
+ }
+ }
+
+ cb, err := parsedBundle.ToCertBundle()
+ if err != nil {
+ return nil, fmt.Errorf("error converting raw cert bundle to cert bundle: %s", err)
+ }
+
+ resp := &logical.Response{
+ Data: map[string]interface{}{
+ "expiration": int64(parsedBundle.Certificate.NotAfter.Unix()),
+ "serial_number": cb.SerialNumber,
+ },
+ }
+
+ switch format {
+ case "pem":
+ resp.Data["certificate"] = cb.Certificate
+ resp.Data["issuing_ca"] = cb.Certificate
+ if exported {
+ resp.Data["private_key"] = cb.PrivateKey
+ resp.Data["private_key_type"] = cb.PrivateKeyType
+ }
+
+ case "pem_bundle":
+ resp.Data["issuing_ca"] = cb.Certificate
+
+ if exported {
+ resp.Data["private_key"] = cb.PrivateKey
+ resp.Data["private_key_type"] = cb.PrivateKeyType
+ resp.Data["certificate"] = fmt.Sprintf("%s\n%s", cb.PrivateKey, cb.Certificate)
+ } else {
+ resp.Data["certificate"] = cb.Certificate
+ }
+
+ case "der":
+ resp.Data["certificate"] = base64.StdEncoding.EncodeToString(parsedBundle.CertificateBytes)
+ resp.Data["issuing_ca"] = base64.StdEncoding.EncodeToString(parsedBundle.CertificateBytes)
+ if exported {
+ resp.Data["private_key"] = base64.StdEncoding.EncodeToString(parsedBundle.PrivateKeyBytes)
+ resp.Data["private_key_type"] = cb.PrivateKeyType
+ }
+ }
+
+ // Store it as the CA bundle
+ entry, err := logical.StorageEntryJSON("config/ca_bundle", cb)
+ if err != nil {
+ return nil, err
+ }
+ err = req.Storage.Put(entry)
+ if err != nil {
+ return nil, err
+ }
+
+ // Also store it as just the certificate identified by serial number, so it
+ // can be revoked
+ err = req.Storage.Put(&logical.StorageEntry{
+ Key: "certs/" + normalizeSerial(cb.SerialNumber),
+ Value: parsedBundle.CertificateBytes,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("Unable to store certificate locally: %v", err)
+ }
+
+ // For ease of later use, also store just the certificate at a known
+ // location
+ entry.Key = "ca"
+ entry.Value = parsedBundle.CertificateBytes
+ err = req.Storage.Put(entry)
+ if err != nil {
+ return nil, err
+ }
+
+ // Build a fresh CRL
+ err = buildCRL(b, req)
+ if err != nil {
+ return nil, err
+ }
+
+ if parsedBundle.Certificate.MaxPathLen == 0 {
+ resp.AddWarning("Max path length of the generated certificate is zero. This certificate cannot be used to issue intermediate CA certificates.")
+ }
+
+ return resp, nil
+}
+
+func (b *backend) pathCASignIntermediate(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ var err error
+
+ format := getFormat(data)
+ if format == "" {
+ return logical.ErrorResponse(
+ `The "format" path parameter must be "pem" or "der"`,
+ ), nil
+ }
+
+ role := &roleEntry{
+ TTL: data.Get("ttl").(string),
+ AllowLocalhost: true,
+ AllowAnyName: true,
+ AllowIPSANs: true,
+ EnforceHostnames: false,
+ KeyType: "any",
+ }
+
+ if cn := data.Get("common_name").(string); len(cn) == 0 {
+ role.UseCSRCommonName = true
+ }
+
+ var caErr error
+ signingBundle, caErr := fetchCAInfo(req)
+ switch caErr.(type) {
+ case errutil.UserError:
+ return nil, errutil.UserError{Err: fmt.Sprintf(
+ "could not fetch the CA certificate (was one set?): %s", caErr)}
+ case errutil.InternalError:
+ return nil, errutil.InternalError{Err: fmt.Sprintf(
+ "error fetching CA certificate: %s", caErr)}
+ }
+
+ useCSRValues := data.Get("use_csr_values").(bool)
+
+ maxPathLengthIface, ok := data.GetOk("max_path_length")
+ if ok {
+ maxPathLength := maxPathLengthIface.(int)
+ role.MaxPathLength = &maxPathLength
+ }
+
+ parsedBundle, err := signCert(b, role, signingBundle, true, useCSRValues, req, data)
+ if err != nil {
+ switch err.(type) {
+ case errutil.UserError:
+ return logical.ErrorResponse(err.Error()), nil
+ case errutil.InternalError:
+ return nil, err
+ }
+ }
+
+ if err := parsedBundle.Verify(); err != nil {
+ return nil, fmt.Errorf("verification of parsed bundle failed: %s", err)
+ }
+
+ signingCB, err := signingBundle.ToCertBundle()
+ if err != nil {
+ return nil, fmt.Errorf("Error converting raw signing bundle to cert bundle: %s", err)
+ }
+
+ cb, err := parsedBundle.ToCertBundle()
+ if err != nil {
+ return nil, fmt.Errorf("Error converting raw cert bundle to cert bundle: %s", err)
+ }
+
+ resp := &logical.Response{
+ Data: map[string]interface{}{
+ "expiration": int64(parsedBundle.Certificate.NotAfter.Unix()),
+ "serial_number": cb.SerialNumber,
+ },
+ }
+
+ switch format {
+ case "pem":
+ resp.Data["certificate"] = cb.Certificate
+ resp.Data["issuing_ca"] = signingCB.Certificate
+ if cb.CAChain != nil && len(cb.CAChain) > 0 {
+ resp.Data["ca_chain"] = cb.CAChain
+ }
+
+ case "pem_bundle":
+ resp.Data["certificate"] = cb.ToPEMBundle()
+ resp.Data["issuing_ca"] = signingCB.Certificate
+ if cb.CAChain != nil && len(cb.CAChain) > 0 {
+ resp.Data["ca_chain"] = cb.CAChain
+ }
+
+ case "der":
+ resp.Data["certificate"] = base64.StdEncoding.EncodeToString(parsedBundle.CertificateBytes)
+ resp.Data["issuing_ca"] = base64.StdEncoding.EncodeToString(signingBundle.CertificateBytes)
+
+ var caChain []string
+ for _, caCert := range parsedBundle.CAChain {
+ caChain = append(caChain, base64.StdEncoding.EncodeToString(caCert.Bytes))
+ }
+ if caChain != nil && len(caChain) > 0 {
+ resp.Data["ca_chain"] = cb.CAChain
+ }
+ }
+
+ err = req.Storage.Put(&logical.StorageEntry{
+ Key: "certs/" + normalizeSerial(cb.SerialNumber),
+ Value: parsedBundle.CertificateBytes,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("Unable to store certificate locally: %v", err)
+ }
+
+ if parsedBundle.Certificate.MaxPathLen == 0 {
+ resp.AddWarning("Max path length of the signed certificate is zero. This certificate cannot be used to issue intermediate CA certificates.")
+ }
+
+ return resp, nil
+}
+
+const pathGenerateRootHelpSyn = `
+Generate a new CA certificate and private key used for signing.
+`
+
+const pathGenerateRootHelpDesc = `
+See the API documentation for more information.
+`
+
+const pathSignIntermediateHelpSyn = `
+Issue an intermediate CA certificate based on the provided CSR.
+`
+
+const pathSignIntermediateHelpDesc = `
+See the API documentation for more information.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_tidy.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_tidy.go
new file mode 100644
index 0000000..386ff0f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_tidy.go
@@ -0,0 +1,170 @@
+package pki
+
+import (
+ "crypto/x509"
+ "fmt"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathTidy(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "tidy",
+ Fields: map[string]*framework.FieldSchema{
+ "tidy_cert_store": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Description: `Set to true to enable tidying up
+the certificate store`,
+ Default: false,
+ },
+
+ "tidy_revocation_list": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Description: `Set to true to enable tidying up
+the revocation list`,
+ Default: false,
+ },
+
+ "safety_buffer": &framework.FieldSchema{
+ Type: framework.TypeDurationSecond,
+ Description: `The amount of extra time that must have passed
+beyond certificate expiration before it is removed
+from the backend storage and/or revocation list.
+Defaults to 72 hours.`,
+ Default: 259200, //72h, but TypeDurationSecond currently requires defaults to be int
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathTidyWrite,
+ },
+
+ HelpSynopsis: pathTidyHelpSyn,
+ HelpDescription: pathTidyHelpDesc,
+ }
+}
+
+func (b *backend) pathTidyWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ safetyBuffer := d.Get("safety_buffer").(int)
+ tidyCertStore := d.Get("tidy_cert_store").(bool)
+ tidyRevocationList := d.Get("tidy_revocation_list").(bool)
+
+ bufferDuration := time.Duration(safetyBuffer) * time.Second
+
+ if tidyCertStore {
+ serials, err := req.Storage.List("certs/")
+ if err != nil {
+ return nil, fmt.Errorf("error fetching list of certs: %s", err)
+ }
+
+ for _, serial := range serials {
+ certEntry, err := req.Storage.Get("certs/" + serial)
+ if err != nil {
+ return nil, fmt.Errorf("error fetching certificate %s: %s", serial, err)
+ }
+
+ if certEntry == nil {
+ return nil, fmt.Errorf("certificate entry for serial %s is nil", serial)
+ }
+
+ if certEntry.Value == nil || len(certEntry.Value) == 0 {
+ return nil, fmt.Errorf("found entry for serial %s but actual certificate is empty", serial)
+ }
+
+ cert, err := x509.ParseCertificate(certEntry.Value)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse stored certificate with serial %s: %s", serial, err)
+ }
+
+ if time.Now().After(cert.NotAfter.Add(bufferDuration)) {
+ if err := req.Storage.Delete("certs/" + serial); err != nil {
+ return nil, fmt.Errorf("error deleting serial %s from storage: %s", serial, err)
+ }
+ }
+ }
+ }
+
+ if tidyRevocationList {
+ b.revokeStorageLock.Lock()
+ defer b.revokeStorageLock.Unlock()
+
+ tidiedRevoked := false
+
+ revokedSerials, err := req.Storage.List("revoked/")
+ if err != nil {
+ return nil, fmt.Errorf("error fetching list of revoked certs: %s", err)
+ }
+
+ var revInfo revocationInfo
+ for _, serial := range revokedSerials {
+ revokedEntry, err := req.Storage.Get("revoked/" + serial)
+ if err != nil {
+ return nil, fmt.Errorf("unable to fetch revoked cert with serial %s: %s", serial, err)
+ }
+ if revokedEntry == nil {
+ return nil, fmt.Errorf("revoked certificate entry for serial %s is nil", serial)
+ }
+ if revokedEntry.Value == nil || len(revokedEntry.Value) == 0 {
+ // TODO: In this case, remove it and continue? How likely is this to
+ // happen? Alternately, could skip it entirely, or could implement a
+ // delete function so that there is a way to remove these
+ return nil, fmt.Errorf("found revoked serial but actual certificate is empty")
+ }
+
+ err = revokedEntry.DecodeJSON(&revInfo)
+ if err != nil {
+ return nil, fmt.Errorf("error decoding revocation entry for serial %s: %s", serial, err)
+ }
+
+ revokedCert, err := x509.ParseCertificate(revInfo.CertificateBytes)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse stored revoked certificate with serial %s: %s", serial, err)
+ }
+
+ if time.Now().After(revokedCert.NotAfter.Add(bufferDuration)) {
+ if err := req.Storage.Delete("revoked/" + serial); err != nil {
+ return nil, fmt.Errorf("error deleting serial %s from revoked list: %s", serial, err)
+ }
+ tidiedRevoked = true
+ }
+ }
+
+ if tidiedRevoked {
+ if err := buildCRL(b, req); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ return nil, nil
+}
+
+const pathTidyHelpSyn = `
+Tidy up the backend by removing expired certificates, revocation information,
+or both.
+`
+
+const pathTidyHelpDesc = `
+This endpoint allows expired certificates and/or revocation information to be
+removed from the backend, freeing up storage and shortening CRLs.
+
+For safety, this function is a noop if called without parameters; cleanup from
+normal certificate storage must be enabled with 'tidy_cert_store' and cleanup
+from revocation information must be enabled with 'tidy_revocation_list'.
+
+The 'safety_buffer' parameter is useful to ensure that clock skew amongst your
+hosts cannot lead to a certificate being removed from the CRL while it is still
+considered valid by other hosts (for instance, if their clocks are a few
+minutes behind). The 'safety_buffer' parameter can be an integer number of
+seconds or a string duration like "72h".
+
+All certificates and/or revocation information currently stored in the backend
+will be checked when this endpoint is hit. The expiration of the
+certificate/revocation information of each certificate being held in
+certificate storage or in revocation infomation will then be checked. If the
+current time, minus the value of 'safety_buffer', is greater than the
+expiration, it will be removed.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/secret_certs.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/secret_certs.go
new file mode 100644
index 0000000..32f6f42
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/secret_certs.go
@@ -0,0 +1,52 @@
+package pki
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+// SecretCertsType is the name used to identify this type
+const SecretCertsType = "pki"
+
+func secretCerts(b *backend) *framework.Secret {
+ return &framework.Secret{
+ Type: SecretCertsType,
+ Fields: map[string]*framework.FieldSchema{
+ "certificate": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `The PEM-encoded concatenated certificate and
+issuing certificate authority`,
+ },
+ "private_key": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "The PEM-encoded private key for the certificate",
+ },
+ "serial": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `The serial number of the certificate, for handy
+reference`,
+ },
+ },
+
+ Revoke: b.secretCredsRevoke,
+ }
+}
+
+func (b *backend) secretCredsRevoke(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ if req.Secret == nil {
+ return nil, fmt.Errorf("secret is nil in request")
+ }
+
+ serialInt, ok := req.Secret.InternalData["serial_number"]
+ if !ok {
+ return nil, fmt.Errorf("could not find serial in internal secret data")
+ }
+
+ b.revokeStorageLock.Lock()
+ defer b.revokeStorageLock.Unlock()
+
+ return revokeCert(b, req, serialInt.(string), true)
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/util.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/util.go
new file mode 100644
index 0000000..3dffb53
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/util.go
@@ -0,0 +1,7 @@
+package pki
+
+import "strings"
+
+func normalizeSerial(serial string) string {
+ return strings.Replace(strings.ToLower(serial), ":", "-", -1)
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/backend.go
new file mode 100644
index 0000000..6f4befd
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/backend.go
@@ -0,0 +1,161 @@
+package postgresql
+
+import (
+ "database/sql"
+ "fmt"
+ "strings"
+ "sync"
+
+ log "github.com/mgutz/logxi/v1"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
+ return Backend(conf).Setup(conf)
+}
+
+func Backend(conf *logical.BackendConfig) *backend {
+ var b backend
+ b.Backend = &framework.Backend{
+ Help: strings.TrimSpace(backendHelp),
+
+ Paths: []*framework.Path{
+ pathConfigConnection(&b),
+ pathConfigLease(&b),
+ pathListRoles(&b),
+ pathRoles(&b),
+ pathRoleCreate(&b),
+ },
+
+ Secrets: []*framework.Secret{
+ secretCreds(&b),
+ },
+
+ Clean: b.ResetDB,
+
+ Invalidate: b.invalidate,
+ }
+
+ b.logger = conf.Logger
+ return &b
+}
+
+type backend struct {
+ *framework.Backend
+
+ db *sql.DB
+ lock sync.Mutex
+
+ logger log.Logger
+}
+
+// DB returns the database connection.
+func (b *backend) DB(s logical.Storage) (*sql.DB, error) {
+ b.logger.Trace("postgres/db: enter")
+ defer b.logger.Trace("postgres/db: exit")
+
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ // If we already have a DB, we got it!
+ if b.db != nil {
+ if err := b.db.Ping(); err == nil {
+ return b.db, nil
+ }
+ // If the ping was unsuccessful, close it and ignore errors as we'll be
+ // reestablishing anyways
+ b.db.Close()
+ }
+
+ // Otherwise, attempt to make connection
+ entry, err := s.Get("config/connection")
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil,
+ fmt.Errorf("configure the DB connection with config/connection first")
+ }
+
+ var connConfig connectionConfig
+ if err := entry.DecodeJSON(&connConfig); err != nil {
+ return nil, err
+ }
+
+ conn := connConfig.ConnectionURL
+ if len(conn) == 0 {
+ conn = connConfig.ConnectionString
+ }
+
+ // Ensure timezone is set to UTC for all the conenctions
+ if strings.HasPrefix(conn, "postgres://") || strings.HasPrefix(conn, "postgresql://") {
+ if strings.Contains(conn, "?") {
+ conn += "&timezone=utc"
+ } else {
+ conn += "?timezone=utc"
+ }
+ } else {
+ conn += " timezone=utc"
+ }
+
+ b.db, err = sql.Open("postgres", conn)
+ if err != nil {
+ return nil, err
+ }
+
+ // Set some connection pool settings. We don't need much of this,
+ // since the request rate shouldn't be high.
+ b.db.SetMaxOpenConns(connConfig.MaxOpenConnections)
+ b.db.SetMaxIdleConns(connConfig.MaxIdleConnections)
+
+ return b.db, nil
+}
+
+// ResetDB forces a connection next time DB() is called.
+func (b *backend) ResetDB() {
+ b.logger.Trace("postgres/resetdb: enter")
+ defer b.logger.Trace("postgres/resetdb: exit")
+
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ if b.db != nil {
+ b.db.Close()
+ }
+
+ b.db = nil
+}
+
+func (b *backend) invalidate(key string) {
+ switch key {
+ case "config/connection":
+ b.ResetDB()
+ }
+}
+
+// Lease returns the lease information
+func (b *backend) Lease(s logical.Storage) (*configLease, error) {
+ entry, err := s.Get("config/lease")
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result configLease
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+const backendHelp = `
+The PostgreSQL backend dynamically generates database users.
+
+After mounting this backend, configure it using the endpoints within
+the "config/" path.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/backend_test.go
new file mode 100644
index 0000000..5559a80
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/backend_test.go
@@ -0,0 +1,620 @@
+package postgresql
+
+import (
+ "database/sql"
+ "encoding/json"
+ "fmt"
+ "log"
+ "os"
+ "path"
+ "reflect"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+ logicaltest "github.com/hashicorp/vault/logical/testing"
+ "github.com/lib/pq"
+ "github.com/mitchellh/mapstructure"
+ dockertest "gopkg.in/ory-am/dockertest.v2"
+)
+
+var (
+ testImagePull sync.Once
+)
+
+func prepareTestContainer(t *testing.T, s logical.Storage, b logical.Backend) (cid dockertest.ContainerID, retURL string) {
+ if os.Getenv("PG_URL") != "" {
+ return "", os.Getenv("PG_URL")
+ }
+
+ // Without this the checks for whether the container has started seem to
+ // never actually pass. There's really no reason to expose the test
+ // containers, so don't.
+ dockertest.BindDockerToLocalhost = "yep"
+
+ testImagePull.Do(func() {
+ dockertest.Pull("postgres")
+ })
+
+ cid, connErr := dockertest.ConnectToPostgreSQL(60, 500*time.Millisecond, func(connURL string) bool {
+ // This will cause a validation to run
+ resp, err := b.HandleRequest(&logical.Request{
+ Storage: s,
+ Operation: logical.UpdateOperation,
+ Path: "config/connection",
+ Data: map[string]interface{}{
+ "connection_url": connURL,
+ },
+ })
+ if err != nil || (resp != nil && resp.IsError()) {
+ // It's likely not up and running yet, so return false and try again
+ return false
+ }
+ if resp == nil {
+ t.Fatal("expected warning")
+ }
+
+ retURL = connURL
+ return true
+ })
+
+ if connErr != nil {
+ t.Fatalf("could not connect to database: %v", connErr)
+ }
+
+ return
+}
+
+func cleanupTestContainer(t *testing.T, cid dockertest.ContainerID) {
+ err := cid.KillRemove()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestBackend_config_connection(t *testing.T) {
+ var resp *logical.Response
+ var err error
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ configData := map[string]interface{}{
+ "connection_url": "sample_connection_url",
+ "value": "",
+ "max_open_connections": 9,
+ "max_idle_connections": 7,
+ "verify_connection": false,
+ }
+
+ configReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "config/connection",
+ Storage: config.StorageView,
+ Data: configData,
+ }
+ resp, err = b.HandleRequest(configReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, resp)
+ }
+
+ configReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(configReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, resp)
+ }
+
+ delete(configData, "verify_connection")
+ if !reflect.DeepEqual(configData, resp.Data) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v\n", configData, resp.Data)
+ }
+}
+
+func TestBackend_basic(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cid, connURL := prepareTestContainer(t, config.StorageView, b)
+ if cid != "" {
+ defer cleanupTestContainer(t, cid)
+ }
+ connData := map[string]interface{}{
+ "connection_url": connURL,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepConfig(t, connData, false),
+ testAccStepCreateRole(t, "web", testRole, false),
+ testAccStepReadCreds(t, b, config.StorageView, "web", connURL),
+ },
+ })
+}
+
+func TestBackend_roleCrud(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cid, connURL := prepareTestContainer(t, config.StorageView, b)
+ if cid != "" {
+ defer cleanupTestContainer(t, cid)
+ }
+ connData := map[string]interface{}{
+ "connection_url": connURL,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepConfig(t, connData, false),
+ testAccStepCreateRole(t, "web", testRole, false),
+ testAccStepReadRole(t, "web", testRole),
+ testAccStepDeleteRole(t, "web"),
+ testAccStepReadRole(t, "web", ""),
+ },
+ })
+}
+
+func TestBackend_BlockStatements(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cid, connURL := prepareTestContainer(t, config.StorageView, b)
+ if cid != "" {
+ defer cleanupTestContainer(t, cid)
+ }
+ connData := map[string]interface{}{
+ "connection_url": connURL,
+ }
+
+ jsonBlockStatement, err := json.Marshal(testBlockStatementRoleSlice)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepConfig(t, connData, false),
+ // This will also validate the query
+ testAccStepCreateRole(t, "web-block", testBlockStatementRole, true),
+ testAccStepCreateRole(t, "web-block", string(jsonBlockStatement), false),
+ },
+ })
+}
+
+func TestBackend_roleReadOnly(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cid, connURL := prepareTestContainer(t, config.StorageView, b)
+ if cid != "" {
+ defer cleanupTestContainer(t, cid)
+ }
+ connData := map[string]interface{}{
+ "connection_url": connURL,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepConfig(t, connData, false),
+ testAccStepCreateRole(t, "web", testRole, false),
+ testAccStepCreateRole(t, "web-readonly", testReadOnlyRole, false),
+ testAccStepReadRole(t, "web-readonly", testReadOnlyRole),
+ testAccStepCreateTable(t, b, config.StorageView, "web", connURL),
+ testAccStepReadCreds(t, b, config.StorageView, "web-readonly", connURL),
+ testAccStepDropTable(t, b, config.StorageView, "web", connURL),
+ testAccStepDeleteRole(t, "web-readonly"),
+ testAccStepDeleteRole(t, "web"),
+ testAccStepReadRole(t, "web-readonly", ""),
+ },
+ })
+}
+
+func TestBackend_roleReadOnly_revocationSQL(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cid, connURL := prepareTestContainer(t, config.StorageView, b)
+ if cid != "" {
+ defer cleanupTestContainer(t, cid)
+ }
+ connData := map[string]interface{}{
+ "connection_url": connURL,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepConfig(t, connData, false),
+ testAccStepCreateRoleWithRevocationSQL(t, "web", testRole, defaultRevocationSQL, false),
+ testAccStepCreateRoleWithRevocationSQL(t, "web-readonly", testReadOnlyRole, defaultRevocationSQL, false),
+ testAccStepReadRole(t, "web-readonly", testReadOnlyRole),
+ testAccStepCreateTable(t, b, config.StorageView, "web", connURL),
+ testAccStepReadCreds(t, b, config.StorageView, "web-readonly", connURL),
+ testAccStepDropTable(t, b, config.StorageView, "web", connURL),
+ testAccStepDeleteRole(t, "web-readonly"),
+ testAccStepDeleteRole(t, "web"),
+ testAccStepReadRole(t, "web-readonly", ""),
+ },
+ })
+}
+
+func testAccStepConfig(t *testing.T, d map[string]interface{}, expectError bool) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config/connection",
+ Data: d,
+ ErrorOk: true,
+ Check: func(resp *logical.Response) error {
+ if expectError {
+ if resp.Data == nil {
+ return fmt.Errorf("data is nil")
+ }
+ var e struct {
+ Error string `mapstructure:"error"`
+ }
+ if err := mapstructure.Decode(resp.Data, &e); err != nil {
+ return err
+ }
+ if len(e.Error) == 0 {
+ return fmt.Errorf("expected error, but write succeeded.")
+ }
+ return nil
+ } else if resp != nil && resp.IsError() {
+ return fmt.Errorf("got an error response: %v", resp.Error())
+ }
+ return nil
+ },
+ }
+}
+
+func testAccStepCreateRole(t *testing.T, name string, sql string, expectFail bool) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: path.Join("roles", name),
+ Data: map[string]interface{}{
+ "sql": sql,
+ },
+ ErrorOk: expectFail,
+ }
+}
+
+func testAccStepCreateRoleWithRevocationSQL(t *testing.T, name, sql, revocationSQL string, expectFail bool) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: path.Join("roles", name),
+ Data: map[string]interface{}{
+ "sql": sql,
+ "revocation_sql": revocationSQL,
+ },
+ ErrorOk: expectFail,
+ }
+}
+
+func testAccStepDeleteRole(t *testing.T, name string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.DeleteOperation,
+ Path: path.Join("roles", name),
+ }
+}
+
+func testAccStepReadCreds(t *testing.T, b logical.Backend, s logical.Storage, name string, connURL string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: path.Join("creds", name),
+ Check: func(resp *logical.Response) error {
+ var d struct {
+ Username string `mapstructure:"username"`
+ Password string `mapstructure:"password"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+ log.Printf("[TRACE] Generated credentials: %v", d)
+ conn, err := pq.ParseURL(connURL)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ conn += " timezone=utc"
+
+ db, err := sql.Open("postgres", conn)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ returnedRows := func() int {
+ stmt, err := db.Prepare("SELECT DISTINCT schemaname FROM pg_tables WHERE has_table_privilege($1, 'information_schema.role_column_grants', 'select');")
+ if err != nil {
+ return -1
+ }
+ defer stmt.Close()
+
+ rows, err := stmt.Query(d.Username)
+ if err != nil {
+ return -1
+ }
+ defer rows.Close()
+
+ i := 0
+ for rows.Next() {
+ i++
+ }
+ return i
+ }
+
+ // minNumPermissions is the minimum number of permissions that will always be present.
+ const minNumPermissions = 2
+
+ userRows := returnedRows()
+ if userRows < minNumPermissions {
+ t.Fatalf("did not get expected number of rows, got %d", userRows)
+ }
+
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.RevokeOperation,
+ Storage: s,
+ Secret: &logical.Secret{
+ InternalData: map[string]interface{}{
+ "secret_type": "creds",
+ "username": d.Username,
+ "role": name,
+ },
+ },
+ })
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.IsError() {
+ return fmt.Errorf("Error on resp: %#v", *resp)
+ }
+ }
+
+ userRows = returnedRows()
+ // User shouldn't exist so returnedRows() should encounter an error and exit with -1
+ if userRows != -1 {
+ t.Fatalf("did not get expected number of rows, got %d", userRows)
+ }
+
+ return nil
+ },
+ }
+}
+
+func testAccStepCreateTable(t *testing.T, b logical.Backend, s logical.Storage, name string, connURL string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: path.Join("creds", name),
+ Check: func(resp *logical.Response) error {
+ var d struct {
+ Username string `mapstructure:"username"`
+ Password string `mapstructure:"password"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+ log.Printf("[TRACE] Generated credentials: %v", d)
+ conn, err := pq.ParseURL(connURL)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ conn += " timezone=utc"
+
+ db, err := sql.Open("postgres", conn)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = db.Exec("CREATE TABLE test (id SERIAL PRIMARY KEY);")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.RevokeOperation,
+ Storage: s,
+ Secret: &logical.Secret{
+ InternalData: map[string]interface{}{
+ "secret_type": "creds",
+ "username": d.Username,
+ },
+ },
+ })
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.IsError() {
+ return fmt.Errorf("Error on resp: %#v", *resp)
+ }
+ }
+
+ return nil
+ },
+ }
+}
+
+func testAccStepDropTable(t *testing.T, b logical.Backend, s logical.Storage, name string, connURL string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: path.Join("creds", name),
+ Check: func(resp *logical.Response) error {
+ var d struct {
+ Username string `mapstructure:"username"`
+ Password string `mapstructure:"password"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+ log.Printf("[TRACE] Generated credentials: %v", d)
+ conn, err := pq.ParseURL(connURL)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ conn += " timezone=utc"
+
+ db, err := sql.Open("postgres", conn)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = db.Exec("DROP TABLE test;")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.RevokeOperation,
+ Storage: s,
+ Secret: &logical.Secret{
+ InternalData: map[string]interface{}{
+ "secret_type": "creds",
+ "username": d.Username,
+ },
+ },
+ })
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.IsError() {
+ return fmt.Errorf("Error on resp: %#v", *resp)
+ }
+ }
+
+ return nil
+ },
+ }
+}
+
+func testAccStepReadRole(t *testing.T, name string, sql string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "roles/" + name,
+ Check: func(resp *logical.Response) error {
+ if resp == nil {
+ if sql == "" {
+ return nil
+ }
+
+ return fmt.Errorf("bad: %#v", resp)
+ }
+
+ var d struct {
+ SQL string `mapstructure:"sql"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+
+ if d.SQL != sql {
+ return fmt.Errorf("bad: %#v", resp)
+ }
+
+ return nil
+ },
+ }
+}
+
+const testRole = `
+CREATE ROLE "{{name}}" WITH
+ LOGIN
+ PASSWORD '{{password}}'
+ VALID UNTIL '{{expiration}}';
+GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}";
+`
+
+const testReadOnlyRole = `
+CREATE ROLE "{{name}}" WITH
+ LOGIN
+ PASSWORD '{{password}}'
+ VALID UNTIL '{{expiration}}';
+GRANT SELECT ON ALL TABLES IN SCHEMA public TO "{{name}}";
+GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO "{{name}}";
+`
+
+const testBlockStatementRole = `
+DO $$
+BEGIN
+ IF NOT EXISTS (SELECT * FROM pg_catalog.pg_roles WHERE rolname='foo-role') THEN
+ CREATE ROLE "foo-role";
+ CREATE SCHEMA IF NOT EXISTS foo AUTHORIZATION "foo-role";
+ ALTER ROLE "foo-role" SET search_path = foo;
+ GRANT TEMPORARY ON DATABASE "postgres" TO "foo-role";
+ GRANT ALL PRIVILEGES ON SCHEMA foo TO "foo-role";
+ GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA foo TO "foo-role";
+ GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA foo TO "foo-role";
+ GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA foo TO "foo-role";
+ END IF;
+END
+$$
+
+CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}';
+GRANT "foo-role" TO "{{name}}";
+ALTER ROLE "{{name}}" SET search_path = foo;
+GRANT CONNECT ON DATABASE "postgres" TO "{{name}}";
+`
+
+var testBlockStatementRoleSlice = []string{
+ `
+DO $$
+BEGIN
+ IF NOT EXISTS (SELECT * FROM pg_catalog.pg_roles WHERE rolname='foo-role') THEN
+ CREATE ROLE "foo-role";
+ CREATE SCHEMA IF NOT EXISTS foo AUTHORIZATION "foo-role";
+ ALTER ROLE "foo-role" SET search_path = foo;
+ GRANT TEMPORARY ON DATABASE "postgres" TO "foo-role";
+ GRANT ALL PRIVILEGES ON SCHEMA foo TO "foo-role";
+ GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA foo TO "foo-role";
+ GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA foo TO "foo-role";
+ GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA foo TO "foo-role";
+ END IF;
+END
+$$
+`,
+ `CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}';`,
+ `GRANT "foo-role" TO "{{name}}";`,
+ `ALTER ROLE "{{name}}" SET search_path = foo;`,
+ `GRANT CONNECT ON DATABASE "postgres" TO "{{name}}";`,
+}
+
+const defaultRevocationSQL = `
+REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM {{name}};
+REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM {{name}};
+REVOKE USAGE ON SCHEMA public FROM {{name}};
+
+DROP ROLE IF EXISTS {{name}};
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/path_config_connection.go b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/path_config_connection.go
new file mode 100644
index 0000000..577c296
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/path_config_connection.go
@@ -0,0 +1,165 @@
+package postgresql
+
+import (
+ "database/sql"
+ "fmt"
+
+ "github.com/fatih/structs"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ _ "github.com/lib/pq"
+)
+
+func pathConfigConnection(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "config/connection",
+ Fields: map[string]*framework.FieldSchema{
+ "connection_url": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "DB connection string",
+ },
+
+ "value": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `DB connection string. Use 'connection_url' instead.
+This will be deprecated.`,
+ },
+
+ "verify_connection": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: true,
+ Description: `If set, connection_url is verified by actually connecting to the database`,
+ },
+
+ "max_open_connections": &framework.FieldSchema{
+ Type: framework.TypeInt,
+ Description: `Maximum number of open connections to the database;
+a zero uses the default value of two and a
+negative value means unlimited`,
+ },
+
+ "max_idle_connections": &framework.FieldSchema{
+ Type: framework.TypeInt,
+ Description: `Maximum number of idle connections to the database;
+a zero uses the value of max_open_connections
+and a negative value disables idle connections.
+If larger than max_open_connections it will be
+reduced to the same size.`,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathConnectionWrite,
+ logical.ReadOperation: b.pathConnectionRead,
+ },
+
+ HelpSynopsis: pathConfigConnectionHelpSyn,
+ HelpDescription: pathConfigConnectionHelpDesc,
+ }
+}
+
+// pathConnectionRead reads out the connection configuration
+func (b *backend) pathConnectionRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ entry, err := req.Storage.Get("config/connection")
+ if err != nil {
+ return nil, fmt.Errorf("failed to read connection configuration")
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var config connectionConfig
+ if err := entry.DecodeJSON(&config); err != nil {
+ return nil, err
+ }
+ return &logical.Response{
+ Data: structs.New(config).Map(),
+ }, nil
+}
+
+func (b *backend) pathConnectionWrite(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ connValue := data.Get("value").(string)
+ connURL := data.Get("connection_url").(string)
+ if connURL == "" {
+ if connValue == "" {
+ return logical.ErrorResponse("connection_url parameter must be supplied"), nil
+ } else {
+ connURL = connValue
+ }
+ }
+
+ maxOpenConns := data.Get("max_open_connections").(int)
+ if maxOpenConns == 0 {
+ maxOpenConns = 2
+ }
+
+ maxIdleConns := data.Get("max_idle_connections").(int)
+ if maxIdleConns == 0 {
+ maxIdleConns = maxOpenConns
+ }
+ if maxIdleConns > maxOpenConns {
+ maxIdleConns = maxOpenConns
+ }
+
+ // Don't check the connection_url if verification is disabled
+ verifyConnection := data.Get("verify_connection").(bool)
+ if verifyConnection {
+ // Verify the string
+ db, err := sql.Open("postgres", connURL)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Error validating connection info: %s", err)), nil
+ }
+ defer db.Close()
+ if err := db.Ping(); err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Error validating connection info: %s", err)), nil
+ }
+ }
+
+ // Store it
+ entry, err := logical.StorageEntryJSON("config/connection", connectionConfig{
+ ConnectionString: connValue,
+ ConnectionURL: connURL,
+ MaxOpenConnections: maxOpenConns,
+ MaxIdleConnections: maxIdleConns,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ // Reset the DB connection
+ b.ResetDB()
+
+ resp := &logical.Response{}
+ resp.AddWarning("Read access to this endpoint should be controlled via ACLs as it will return the connection string or URL as it is, including passwords, if any.")
+
+ return resp, nil
+}
+
+type connectionConfig struct {
+ ConnectionURL string `json:"connection_url" structs:"connection_url" mapstructure:"connection_url"`
+ // Deprecate "value" in coming releases
+ ConnectionString string `json:"value" structs:"value" mapstructure:"value"`
+ MaxOpenConnections int `json:"max_open_connections" structs:"max_open_connections" mapstructure:"max_open_connections"`
+ MaxIdleConnections int `json:"max_idle_connections" structs:"max_idle_connections" mapstructure:"max_idle_connections"`
+}
+
+const pathConfigConnectionHelpSyn = `
+Configure the connection string to talk to PostgreSQL.
+`
+
+const pathConfigConnectionHelpDesc = `
+This path configures the connection string used to connect to PostgreSQL.
+The value of the string can be a URL, or a PG style string in the
+format of "user=foo host=bar" etc.
+
+The URL looks like:
+"postgresql://user:pass@host:port/dbname"
+
+When configuring the connection string, the backend will verify its validity.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/path_config_lease.go b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/path_config_lease.go
new file mode 100644
index 0000000..4bc55c8
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/path_config_lease.go
@@ -0,0 +1,103 @@
+package postgresql
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathConfigLease(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "config/lease",
+ Fields: map[string]*framework.FieldSchema{
+ "lease": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Default lease for roles.",
+ },
+
+ "lease_max": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Maximum time a credential is valid for.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathLeaseRead,
+ logical.UpdateOperation: b.pathLeaseWrite,
+ },
+
+ HelpSynopsis: pathConfigLeaseHelpSyn,
+ HelpDescription: pathConfigLeaseHelpDesc,
+ }
+}
+
+func (b *backend) pathLeaseWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ leaseRaw := d.Get("lease").(string)
+ leaseMaxRaw := d.Get("lease_max").(string)
+
+ lease, err := time.ParseDuration(leaseRaw)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Invalid lease: %s", err)), nil
+ }
+ leaseMax, err := time.ParseDuration(leaseMaxRaw)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Invalid lease: %s", err)), nil
+ }
+
+ // Store it
+ entry, err := logical.StorageEntryJSON("config/lease", &configLease{
+ Lease: lease,
+ LeaseMax: leaseMax,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *backend) pathLeaseRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ lease, err := b.Lease(req.Storage)
+
+ if err != nil {
+ return nil, err
+ }
+ if lease == nil {
+ return nil, nil
+ }
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "lease": lease.Lease.String(),
+ "lease_max": lease.LeaseMax.String(),
+ },
+ }, nil
+}
+
+type configLease struct {
+ Lease time.Duration
+ LeaseMax time.Duration
+}
+
+const pathConfigLeaseHelpSyn = `
+Configure the default lease information for generated credentials.
+`
+
+const pathConfigLeaseHelpDesc = `
+This configures the default lease information used for credentials
+generated by this backend. The lease specifies the duration that a
+credential will be valid for, as well as the maximum session for
+a set of credentials.
+
+The format for the lease is "1h" or integer and then unit. The longest
+unit is hour.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/path_role_create.go b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/path_role_create.go
new file mode 100644
index 0000000..5ca92c4
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/path_role_create.go
@@ -0,0 +1,157 @@
+package postgresql
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ _ "github.com/lib/pq"
+)
+
+func pathRoleCreate(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "creds/" + framework.GenericNameRegex("name"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathRoleCreateRead,
+ },
+
+ HelpSynopsis: pathRoleCreateReadHelpSyn,
+ HelpDescription: pathRoleCreateReadHelpDesc,
+ }
+}
+
+func (b *backend) pathRoleCreateRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ b.logger.Trace("postgres/pathRoleCreateRead: enter")
+ defer b.logger.Trace("postgres/pathRoleCreateRead: exit")
+
+ name := data.Get("name").(string)
+
+ // Get the role
+ b.logger.Trace("postgres/pathRoleCreateRead: getting role")
+ role, err := b.Role(req.Storage, name)
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return logical.ErrorResponse(fmt.Sprintf("unknown role: %s", name)), nil
+ }
+
+ // Determine if we have a lease
+ b.logger.Trace("postgres/pathRoleCreateRead: getting lease")
+ lease, err := b.Lease(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+ // Unlike some other backends we need a lease here (can't leave as 0 and
+ // let core fill it in) because Postgres also expires users as a safety
+ // measure, so cannot be zero
+ if lease == nil {
+ lease = &configLease{
+ Lease: b.System().DefaultLeaseTTL(),
+ }
+ }
+
+ // Generate the username, password and expiration. PG limits user to 63 characters
+ displayName := req.DisplayName
+ if len(displayName) > 26 {
+ displayName = displayName[:26]
+ }
+ userUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ return nil, err
+ }
+ username := fmt.Sprintf("%s-%s", displayName, userUUID)
+ if len(username) > 63 {
+ username = username[:63]
+ }
+ password, err := uuid.GenerateUUID()
+ if err != nil {
+ return nil, err
+ }
+ expiration := time.Now().
+ Add(lease.Lease).
+ Format("2006-01-02 15:04:05-0700")
+
+ // Get our handle
+ b.logger.Trace("postgres/pathRoleCreateRead: getting database handle")
+ db, err := b.DB(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+
+ // Start a transaction
+ b.logger.Trace("postgres/pathRoleCreateRead: starting transaction")
+ tx, err := db.Begin()
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ b.logger.Trace("postgres/pathRoleCreateRead: rolling back transaction")
+ tx.Rollback()
+ }()
+
+ // Execute each query
+ for _, query := range strutil.ParseArbitraryStringSlice(role.SQL, ";") {
+ query = strings.TrimSpace(query)
+ if len(query) == 0 {
+ continue
+ }
+
+ b.logger.Trace("postgres/pathRoleCreateRead: preparing statement")
+ stmt, err := tx.Prepare(Query(query, map[string]string{
+ "name": username,
+ "password": password,
+ "expiration": expiration,
+ }))
+ if err != nil {
+ return nil, err
+ }
+ defer stmt.Close()
+ b.logger.Trace("postgres/pathRoleCreateRead: executing statement")
+ if _, err := stmt.Exec(); err != nil {
+ return nil, err
+ }
+ }
+
+ // Commit the transaction
+
+ b.logger.Trace("postgres/pathRoleCreateRead: committing transaction")
+ if err := tx.Commit(); err != nil {
+ return nil, err
+ }
+
+ // Return the secret
+
+ b.logger.Trace("postgres/pathRoleCreateRead: generating secret")
+ resp := b.Secret(SecretCredsType).Response(map[string]interface{}{
+ "username": username,
+ "password": password,
+ }, map[string]interface{}{
+ "username": username,
+ "role": name,
+ })
+ resp.Secret.TTL = lease.Lease
+ return resp, nil
+}
+
+const pathRoleCreateReadHelpSyn = `
+Request database credentials for a certain role.
+`
+
+const pathRoleCreateReadHelpDesc = `
+This path reads database credentials for a certain role. The
+database credentials will be generated on demand and will be automatically
+revoked when the lease is up.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/path_roles.go b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/path_roles.go
new file mode 100644
index 0000000..dc0aaf0
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/path_roles.go
@@ -0,0 +1,200 @@
+package postgresql
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathListRoles(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "roles/?$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.pathRoleList,
+ },
+
+ HelpSynopsis: pathRoleHelpSyn,
+ HelpDescription: pathRoleHelpDesc,
+ }
+}
+
+func pathRoles(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "roles/" + framework.GenericNameRegex("name"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": {
+ Type: framework.TypeString,
+ Description: "Name of the role.",
+ },
+
+ "sql": {
+ Type: framework.TypeString,
+ Description: "SQL string to create a user. See help for more info.",
+ },
+
+ "revocation_sql": {
+ Type: framework.TypeString,
+ Description: `SQL statements to be executed to revoke a user. Must be a semicolon-separated
+string, a base64-encoded semicolon-separated string, a serialized JSON string
+array, or a base64-encoded serialized JSON string array. The '{{name}}' value
+will be substituted.`,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathRoleRead,
+ logical.UpdateOperation: b.pathRoleCreate,
+ logical.DeleteOperation: b.pathRoleDelete,
+ },
+
+ HelpSynopsis: pathRoleHelpSyn,
+ HelpDescription: pathRoleHelpDesc,
+ }
+}
+
+func (b *backend) Role(s logical.Storage, n string) (*roleEntry, error) {
+ entry, err := s.Get("role/" + n)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result roleEntry
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+func (b *backend) pathRoleDelete(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ err := req.Storage.Delete("role/" + data.Get("name").(string))
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *backend) pathRoleRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ role, err := b.Role(req.Storage, data.Get("name").(string))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "sql": role.SQL,
+ "revocation_sql": role.RevocationSQL,
+ },
+ }, nil
+}
+
+func (b *backend) pathRoleList(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ entries, err := req.Storage.List("role/")
+ if err != nil {
+ return nil, err
+ }
+
+ return logical.ListResponse(entries), nil
+}
+
+func (b *backend) pathRoleCreate(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ name := data.Get("name").(string)
+ sql := data.Get("sql").(string)
+
+ // Get our connection
+ db, err := b.DB(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+
+ // Test the query by trying to prepare it
+ for _, query := range strutil.ParseArbitraryStringSlice(sql, ";") {
+ query = strings.TrimSpace(query)
+ if len(query) == 0 {
+ continue
+ }
+
+ stmt, err := db.Prepare(Query(query, map[string]string{
+ "name": "foo",
+ "password": "bar",
+ "expiration": "",
+ }))
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Error testing query: %s", err)), nil
+ }
+ stmt.Close()
+ }
+
+ // Store it
+ entry, err := logical.StorageEntryJSON("role/"+name, &roleEntry{
+ SQL: sql,
+ RevocationSQL: data.Get("revocation_sql").(string),
+ })
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+type roleEntry struct {
+ SQL string `json:"sql" mapstructure:"sql" structs:"sql"`
+ RevocationSQL string `json:"revocation_sql" mapstructure:"revocation_sql" structs:"revocation_sql"`
+}
+
+const pathRoleHelpSyn = `
+Manage the roles that can be created with this backend.
+`
+
+const pathRoleHelpDesc = `
+This path lets you manage the roles that can be created with this backend.
+
+The "sql" parameter customizes the SQL string used to create the role.
+This can be a sequence of SQL queries. Some substitution will be done to the
+SQL string for certain keys. The names of the variables must be surrounded
+by "{{" and "}}" to be replaced.
+
+ * "name" - The random username generated for the DB user.
+
+ * "password" - The random password generated for the DB user.
+
+ * "expiration" - The timestamp when this user will expire.
+
+Example of a decent SQL query to use:
+
+ CREATE ROLE "{{name}}" WITH
+ LOGIN
+ PASSWORD '{{password}}'
+ VALID UNTIL '{{expiration}}';
+ GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}";
+
+Note the above user would be able to access everything in schema public.
+For more complex GRANT clauses, see the PostgreSQL manual.
+
+The "revocation_sql" parameter customizes the SQL string used to revoke a user.
+Example of a decent revocation SQL query to use:
+
+ REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM {{name}};
+ REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM {{name}};
+ REVOKE USAGE ON SCHEMA public FROM {{name}};
+ DROP ROLE IF EXISTS {{name}};
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/query.go b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/query.go
new file mode 100644
index 0000000..e4f7f59
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/query.go
@@ -0,0 +1,15 @@
+package postgresql
+
+import (
+ "fmt"
+ "strings"
+)
+
+// Query templates a query for us.
+func Query(tpl string, data map[string]string) string {
+ for k, v := range data {
+ tpl = strings.Replace(tpl, fmt.Sprintf("{{%s}}", k), v, -1)
+ }
+
+ return tpl
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/secret_creds.go b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/secret_creds.go
new file mode 100644
index 0000000..535d1c1
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/secret_creds.go
@@ -0,0 +1,270 @@
+package postgresql
+
+import (
+ "database/sql"
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ "github.com/lib/pq"
+)
+
+const SecretCredsType = "creds"
+
+func secretCreds(b *backend) *framework.Secret {
+ return &framework.Secret{
+ Type: SecretCredsType,
+ Fields: map[string]*framework.FieldSchema{
+ "username": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Username",
+ },
+
+ "password": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Password",
+ },
+ },
+
+ Renew: b.secretCredsRenew,
+ Revoke: b.secretCredsRevoke,
+ }
+}
+
+func (b *backend) secretCredsRenew(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ // Get the username from the internal data
+ usernameRaw, ok := req.Secret.InternalData["username"]
+ if !ok {
+ return nil, fmt.Errorf("secret is missing username internal data")
+ }
+ username, ok := usernameRaw.(string)
+
+ // Get our connection
+ db, err := b.DB(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+
+ // Get the lease information
+ lease, err := b.Lease(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+ if lease == nil {
+ lease = &configLease{}
+ }
+
+ f := framework.LeaseExtend(lease.Lease, lease.LeaseMax, b.System())
+ resp, err := f(req, d)
+ if err != nil {
+ return nil, err
+ }
+
+ // Make sure we increase the VALID UNTIL endpoint for this user.
+ if expireTime := resp.Secret.ExpirationTime(); !expireTime.IsZero() {
+ expiration := expireTime.Format("2006-01-02 15:04:05-0700")
+
+ query := fmt.Sprintf(
+ "ALTER ROLE %s VALID UNTIL '%s';",
+ pq.QuoteIdentifier(username),
+ expiration)
+ stmt, err := db.Prepare(query)
+ if err != nil {
+ return nil, err
+ }
+ defer stmt.Close()
+ if _, err := stmt.Exec(); err != nil {
+ return nil, err
+ }
+ }
+
+ return resp, nil
+}
+
+func (b *backend) secretCredsRevoke(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ // Get the username from the internal data
+ usernameRaw, ok := req.Secret.InternalData["username"]
+ if !ok {
+ return nil, fmt.Errorf("secret is missing username internal data")
+ }
+ username, ok := usernameRaw.(string)
+
+ var revocationSQL string
+ var resp *logical.Response
+
+ roleNameRaw, ok := req.Secret.InternalData["role"]
+ if ok {
+ role, err := b.Role(req.Storage, roleNameRaw.(string))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ if resp == nil {
+ resp = &logical.Response{}
+ }
+ resp.AddWarning(fmt.Sprintf("Role %q cannot be found. Using default revocation SQL.", roleNameRaw.(string)))
+ } else {
+ revocationSQL = role.RevocationSQL
+ }
+ }
+
+ // Get our connection
+ db, err := b.DB(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+
+ switch revocationSQL {
+
+ // This is the default revocation logic. If revocation SQL is provided it
+ // is simply executed as-is.
+ case "":
+ // Check if the role exists
+ var exists bool
+ err = db.QueryRow("SELECT exists (SELECT rolname FROM pg_roles WHERE rolname=$1);", username).Scan(&exists)
+ if err != nil && err != sql.ErrNoRows {
+ return nil, err
+ }
+
+ if exists == false {
+ return resp, nil
+ }
+
+ // Query for permissions; we need to revoke permissions before we can drop
+ // the role
+ // This isn't done in a transaction because even if we fail along the way,
+ // we want to remove as much access as possible
+ stmt, err := db.Prepare("SELECT DISTINCT table_schema FROM information_schema.role_column_grants WHERE grantee=$1;")
+ if err != nil {
+ return nil, err
+ }
+ defer stmt.Close()
+
+ rows, err := stmt.Query(username)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ const initialNumRevocations = 16
+ revocationStmts := make([]string, 0, initialNumRevocations)
+ for rows.Next() {
+ var schema string
+ err = rows.Scan(&schema)
+ if err != nil {
+ // keep going; remove as many permissions as possible right now
+ continue
+ }
+ revocationStmts = append(revocationStmts, fmt.Sprintf(
+ `REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA %s FROM %s;`,
+ pq.QuoteIdentifier(schema),
+ pq.QuoteIdentifier(username)))
+
+ revocationStmts = append(revocationStmts, fmt.Sprintf(
+ `REVOKE USAGE ON SCHEMA %s FROM %s;`,
+ pq.QuoteIdentifier(schema),
+ pq.QuoteIdentifier(username)))
+ }
+
+ // for good measure, revoke all privileges and usage on schema public
+ revocationStmts = append(revocationStmts, fmt.Sprintf(
+ `REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM %s;`,
+ pq.QuoteIdentifier(username)))
+
+ revocationStmts = append(revocationStmts, fmt.Sprintf(
+ "REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM %s;",
+ pq.QuoteIdentifier(username)))
+
+ revocationStmts = append(revocationStmts, fmt.Sprintf(
+ "REVOKE USAGE ON SCHEMA public FROM %s;",
+ pq.QuoteIdentifier(username)))
+
+ // get the current database name so we can issue a REVOKE CONNECT for
+ // this username
+ var dbname sql.NullString
+ if err := db.QueryRow("SELECT current_database();").Scan(&dbname); err != nil {
+ return nil, err
+ }
+
+ if dbname.Valid {
+ revocationStmts = append(revocationStmts, fmt.Sprintf(
+ `REVOKE CONNECT ON DATABASE %s FROM %s;`,
+ pq.QuoteIdentifier(dbname.String),
+ pq.QuoteIdentifier(username)))
+ }
+
+ // again, here, we do not stop on error, as we want to remove as
+ // many permissions as possible right now
+ var lastStmtError error
+ for _, query := range revocationStmts {
+ stmt, err := db.Prepare(query)
+ if err != nil {
+ lastStmtError = err
+ continue
+ }
+ defer stmt.Close()
+ _, err = stmt.Exec()
+ if err != nil {
+ lastStmtError = err
+ }
+ }
+
+ // can't drop if not all privileges are revoked
+ if rows.Err() != nil {
+ return nil, fmt.Errorf("could not generate revocation statements for all rows: %s", rows.Err())
+ }
+ if lastStmtError != nil {
+ return nil, fmt.Errorf("could not perform all revocation statements: %s", lastStmtError)
+ }
+
+ // Drop this user
+ stmt, err = db.Prepare(fmt.Sprintf(
+ `DROP ROLE IF EXISTS %s;`, pq.QuoteIdentifier(username)))
+ if err != nil {
+ return nil, err
+ }
+ defer stmt.Close()
+ if _, err := stmt.Exec(); err != nil {
+ return nil, err
+ }
+
+ // We have revocation SQL, execute directly, within a transaction
+ default:
+ tx, err := db.Begin()
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ tx.Rollback()
+ }()
+
+ for _, query := range strutil.ParseArbitraryStringSlice(revocationSQL, ";") {
+ query = strings.TrimSpace(query)
+ if len(query) == 0 {
+ continue
+ }
+
+ stmt, err := tx.Prepare(Query(query, map[string]string{
+ "name": username,
+ }))
+ if err != nil {
+ return nil, err
+ }
+ defer stmt.Close()
+
+ if _, err := stmt.Exec(); err != nil {
+ return nil, err
+ }
+ }
+
+ if err := tx.Commit(); err != nil {
+ return nil, err
+ }
+ }
+
+ return resp, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/backend.go
new file mode 100644
index 0000000..4f9cde0
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/backend.go
@@ -0,0 +1,134 @@
+package rabbitmq
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+
+ "github.com/hashicorp/go-cleanhttp"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ "github.com/michaelklishin/rabbit-hole"
+)
+
+// Factory creates and configures the backend
+func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
+ return Backend().Setup(conf)
+}
+
+// Creates a new backend with all the paths and secrets belonging to it
+func Backend() *backend {
+ var b backend
+ b.Backend = &framework.Backend{
+ Help: strings.TrimSpace(backendHelp),
+
+ Paths: []*framework.Path{
+ pathConfigConnection(&b),
+ pathConfigLease(&b),
+ pathListRoles(&b),
+ pathCreds(&b),
+ pathRoles(&b),
+ },
+
+ Secrets: []*framework.Secret{
+ secretCreds(&b),
+ },
+
+ Clean: b.resetClient,
+
+ Invalidate: b.invalidate,
+ }
+
+ return &b
+}
+
+type backend struct {
+ *framework.Backend
+
+ client *rabbithole.Client
+ lock sync.RWMutex
+}
+
+// DB returns the database connection.
+func (b *backend) Client(s logical.Storage) (*rabbithole.Client, error) {
+ b.lock.RLock()
+
+ // If we already have a client, return it
+ if b.client != nil {
+ b.lock.RUnlock()
+ return b.client, nil
+ }
+
+ b.lock.RUnlock()
+
+ // Otherwise, attempt to make connection
+ entry, err := s.Get("config/connection")
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, fmt.Errorf("configure the client connection with config/connection first")
+ }
+
+ var connConfig connectionConfig
+ if err := entry.DecodeJSON(&connConfig); err != nil {
+ return nil, err
+ }
+
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ // If the client was creted during the lock switch, return it
+ if b.client != nil {
+ return b.client, nil
+ }
+
+ b.client, err = rabbithole.NewClient(connConfig.URI, connConfig.Username, connConfig.Password)
+ if err != nil {
+ return nil, err
+ }
+ // Use a default pooled transport so there would be no leaked file descriptors
+ b.client.SetTransport(cleanhttp.DefaultPooledTransport())
+
+ return b.client, nil
+}
+
+// resetClient forces a connection next time Client() is called.
+func (b *backend) resetClient() {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ b.client = nil
+}
+
+func (b *backend) invalidate(key string) {
+ switch key {
+ case "config/connection":
+ b.resetClient()
+ }
+}
+
+// Lease returns the lease information
+func (b *backend) Lease(s logical.Storage) (*configLease, error) {
+ entry, err := s.Get("config/lease")
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result configLease
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+const backendHelp = `
+The RabbitMQ backend dynamically generates RabbitMQ users.
+
+After mounting this backend, configure it using the endpoints within
+the "config/" path.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/backend_test.go
new file mode 100644
index 0000000..41a45eb
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/backend_test.go
@@ -0,0 +1,218 @@
+package rabbitmq
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "testing"
+
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/logical"
+ logicaltest "github.com/hashicorp/vault/logical/testing"
+ "github.com/michaelklishin/rabbit-hole"
+ "github.com/mitchellh/mapstructure"
+)
+
+// Set the following env vars for the below test case to work.
+//
+// RABBITMQ_CONNECTION_URI
+// RABBITMQ_USERNAME
+// RABBITMQ_PASSWORD
+func TestBackend_basic(t *testing.T) {
+ if os.Getenv(logicaltest.TestEnvVar) == "" {
+ t.Skip(fmt.Sprintf("Acceptance tests skipped unless env '%s' set", logicaltest.TestEnvVar))
+ return
+ }
+ b, _ := Factory(logical.TestBackendConfig())
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepConfig(t),
+ testAccStepRole(t),
+ testAccStepReadCreds(t, b, "web"),
+ },
+ })
+
+}
+
+func TestBackend_roleCrud(t *testing.T) {
+ if os.Getenv(logicaltest.TestEnvVar) == "" {
+ t.Skip(fmt.Sprintf("Acceptance tests skipped unless env '%s' set", logicaltest.TestEnvVar))
+ return
+ }
+ b, _ := Factory(logical.TestBackendConfig())
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepConfig(t),
+ testAccStepRole(t),
+ testAccStepReadRole(t, "web", "administrator", `{"/": {"configure": ".*", "write": ".*", "read": ".*"}}`),
+ testAccStepDeleteRole(t, "web"),
+ testAccStepReadRole(t, "web", "", ""),
+ },
+ })
+}
+
+const (
+ envRabbitMQConnectionURI = "RABBITMQ_CONNECTION_URI"
+ envRabbitMQUsername = "RABBITMQ_USERNAME"
+ envRabbitMQPassword = "RABBITMQ_PASSWORD"
+)
+
+func testAccPreCheck(t *testing.T) {
+ if uri := os.Getenv(envRabbitMQConnectionURI); uri == "" {
+ t.Fatalf(fmt.Sprintf("%s must be set for acceptance tests", envRabbitMQConnectionURI))
+ }
+ if username := os.Getenv(envRabbitMQUsername); username == "" {
+ t.Fatalf(fmt.Sprintf("%s must be set for acceptance tests", envRabbitMQUsername))
+ }
+ if password := os.Getenv(envRabbitMQPassword); password == "" {
+ t.Fatalf(fmt.Sprintf("%s must be set for acceptance tests", envRabbitMQPassword))
+ }
+}
+
+func testAccStepConfig(t *testing.T) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config/connection",
+ Data: map[string]interface{}{
+ "connection_uri": os.Getenv(envRabbitMQConnectionURI),
+ "username": os.Getenv(envRabbitMQUsername),
+ "password": os.Getenv(envRabbitMQPassword),
+ },
+ }
+}
+
+func testAccStepRole(t *testing.T) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "roles/web",
+ Data: map[string]interface{}{
+ "tags": "administrator",
+ "vhosts": `{"/": {"configure": ".*", "write": ".*", "read": ".*"}}`,
+ },
+ }
+}
+
+func testAccStepDeleteRole(t *testing.T, n string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.DeleteOperation,
+ Path: "roles/" + n,
+ }
+}
+
+func testAccStepReadCreds(t *testing.T, b logical.Backend, name string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "creds/" + name,
+ Check: func(resp *logical.Response) error {
+ var d struct {
+ Username string `mapstructure:"username"`
+ Password string `mapstructure:"password"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+ log.Printf("[WARN] Generated credentials: %v", d)
+
+ uri := os.Getenv(envRabbitMQConnectionURI)
+
+ client, err := rabbithole.NewClient(uri, d.Username, d.Password)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.ListVhosts()
+ if err != nil {
+ t.Fatalf("unable to list vhosts with generated credentials: %s", err)
+ }
+
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.RevokeOperation,
+ Secret: &logical.Secret{
+ InternalData: map[string]interface{}{
+ "secret_type": "creds",
+ "username": d.Username,
+ },
+ },
+ })
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.IsError() {
+ return fmt.Errorf("Error on resp: %#v", *resp)
+ }
+ }
+
+ client, err = rabbithole.NewClient(uri, d.Username, d.Password)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.ListVhosts()
+ if err == nil {
+ t.Fatalf("expected to fail listing vhosts: %s", err)
+ }
+
+ return nil
+ },
+ }
+}
+
+func testAccStepReadRole(t *testing.T, name, tags, rawVHosts string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "roles/" + name,
+ Check: func(resp *logical.Response) error {
+ if resp == nil {
+ if tags == "" && rawVHosts == "" {
+ return nil
+ }
+
+ return fmt.Errorf("bad: %#v", resp)
+ }
+
+ var d struct {
+ Tags string `mapstructure:"tags"`
+ VHosts map[string]vhostPermission `mapstructure:"vhosts"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+
+ if d.Tags != tags {
+ return fmt.Errorf("bad: %#v", resp)
+ }
+
+ var vhosts map[string]vhostPermission
+ if err := jsonutil.DecodeJSON([]byte(rawVHosts), &vhosts); err != nil {
+ return fmt.Errorf("bad expected vhosts %#v: %s", vhosts, err)
+ }
+
+ for host, permission := range vhosts {
+ actualPermission, ok := d.VHosts[host]
+ if !ok {
+ return fmt.Errorf("expected vhost: %s", host)
+ }
+
+ if actualPermission.Configure != permission.Configure {
+ return fmt.Errorf("expected permission %s to be %s, got %s", "configure", permission.Configure, actualPermission.Configure)
+ }
+
+ if actualPermission.Write != permission.Write {
+ return fmt.Errorf("expected permission %s to be %s, got %s", "write", permission.Write, actualPermission.Write)
+ }
+
+ if actualPermission.Read != permission.Read {
+ return fmt.Errorf("expected permission %s to be %s, got %s", "read", permission.Read, actualPermission.Read)
+ }
+ }
+
+ return nil
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_config_connection.go b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_config_connection.go
new file mode 100644
index 0000000..53f392c
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_config_connection.go
@@ -0,0 +1,118 @@
+package rabbitmq
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ "github.com/michaelklishin/rabbit-hole"
+)
+
+func pathConfigConnection(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "config/connection",
+ Fields: map[string]*framework.FieldSchema{
+ "connection_uri": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "RabbitMQ Management URI",
+ },
+ "username": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Username of a RabbitMQ management administrator",
+ },
+ "password": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Password of the provided RabbitMQ management user",
+ },
+ "verify_connection": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: true,
+ Description: `If set, connection_uri is verified by actually connecting to the RabbitMQ management API`,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathConnectionUpdate,
+ },
+
+ HelpSynopsis: pathConfigConnectionHelpSyn,
+ HelpDescription: pathConfigConnectionHelpDesc,
+ }
+}
+
+func (b *backend) pathConnectionUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ uri := data.Get("connection_uri").(string)
+ if uri == "" {
+ return logical.ErrorResponse("missing connection_uri"), nil
+ }
+
+ username := data.Get("username").(string)
+ if username == "" {
+ return logical.ErrorResponse("missing username"), nil
+ }
+
+ password := data.Get("password").(string)
+ if password == "" {
+ return logical.ErrorResponse("missing password"), nil
+ }
+
+ // Don't check the connection_url if verification is disabled
+ verifyConnection := data.Get("verify_connection").(bool)
+ if verifyConnection {
+ // Create RabbitMQ management client
+ client, err := rabbithole.NewClient(uri, username, password)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create client: %s", err)
+ }
+
+ // Verify that configured credentials is capable of listing
+ if _, err = client.ListUsers(); err != nil {
+ return nil, fmt.Errorf("failed to validate the connection: %s", err)
+ }
+ }
+
+ // Store it
+ entry, err := logical.StorageEntryJSON("config/connection", connectionConfig{
+ URI: uri,
+ Username: username,
+ Password: password,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ // Reset the client connection
+ b.resetClient()
+
+ return nil, nil
+}
+
+// connectionConfig contains the information required to make a connection to a RabbitMQ node
+type connectionConfig struct {
+ // URI of the RabbitMQ server
+ URI string `json:"connection_uri"`
+
+ // Username which has 'administrator' tag attached to it
+ Username string `json:"username"`
+
+ // Password for the Username
+ Password string `json:"password"`
+}
+
+const pathConfigConnectionHelpSyn = `
+Configure the connection URI, username, and password to talk to RabbitMQ management HTTP API.
+`
+
+const pathConfigConnectionHelpDesc = `
+This path configures the connection properties used to connect to RabbitMQ management HTTP API.
+The "connection_uri" parameter is a string that is used to connect to the API. The "username"
+and "password" parameters are strings that are used as credentials to the API. The "verify_connection"
+parameter is a boolean that is used to verify whether the provided connection URI, username, and password
+are valid.
+
+The URI looks like:
+"http://localhost:15672"
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_config_lease.go b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_config_lease.go
new file mode 100644
index 0000000..25a9f48
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_config_lease.go
@@ -0,0 +1,83 @@
+package rabbitmq
+
+import (
+ "time"
+
+ "github.com/fatih/structs"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathConfigLease(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "config/lease",
+ Fields: map[string]*framework.FieldSchema{
+ "ttl": &framework.FieldSchema{
+ Type: framework.TypeDurationSecond,
+ Default: 0,
+ Description: "Duration before which the issued credentials needs renewal",
+ },
+ "max_ttl": &framework.FieldSchema{
+ Type: framework.TypeDurationSecond,
+ Default: 0,
+ Description: `Duration after which the issued credentials should not be allowed to be renewed`,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathLeaseRead,
+ logical.UpdateOperation: b.pathLeaseUpdate,
+ },
+
+ HelpSynopsis: pathConfigLeaseHelpSyn,
+ HelpDescription: pathConfigLeaseHelpDesc,
+ }
+}
+
+// Sets the lease configuration parameters
+func (b *backend) pathLeaseUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ entry, err := logical.StorageEntryJSON("config/lease", &configLease{
+ TTL: time.Second * time.Duration(d.Get("ttl").(int)),
+ MaxTTL: time.Second * time.Duration(d.Get("max_ttl").(int)),
+ })
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+// Returns the lease configuration parameters
+func (b *backend) pathLeaseRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ lease, err := b.Lease(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+ if lease == nil {
+ return nil, nil
+ }
+
+ lease.TTL = lease.TTL / time.Second
+ lease.MaxTTL = lease.MaxTTL / time.Second
+
+ return &logical.Response{
+ Data: structs.New(lease).Map(),
+ }, nil
+}
+
+// Lease configuration information for the secrets issued by this backend
+type configLease struct {
+ TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl"`
+ MaxTTL time.Duration `json:"max_ttl" structs:"max_ttl" mapstructure:"max_ttl"`
+}
+
+var pathConfigLeaseHelpSyn = "Configure the lease parameters for generated credentials"
+
+var pathConfigLeaseHelpDesc = `
+Sets the ttl and max_ttl values for the secrets to be issued by this backend.
+Both ttl and max_ttl takes in an integer number of seconds as input as well as
+inputs like "1h".
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_config_lease_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_config_lease_test.go
new file mode 100644
index 0000000..a5c9983
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_config_lease_test.go
@@ -0,0 +1,53 @@
+package rabbitmq
+
+import (
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestBackend_config_lease_RU(t *testing.T) {
+ var resp *logical.Response
+ var err error
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b := Backend()
+ if _, err = b.Setup(config); err != nil {
+ t.Fatal(err)
+ }
+
+ configData := map[string]interface{}{
+ "ttl": "10h",
+ "max_ttl": "20h",
+ }
+ configReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "config/lease",
+ Storage: config.StorageView,
+ Data: configData,
+ }
+ resp, err = b.HandleRequest(configReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: resp: %#v\nerr:%s", resp, err)
+ }
+ if resp != nil {
+ t.Fatal("expected a nil response")
+ }
+
+ configReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(configReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: resp: %#v\nerr:%s", resp, err)
+ }
+ if resp == nil {
+ t.Fatal("expected a response")
+ }
+
+ if resp.Data["ttl"].(time.Duration) != 36000 {
+ t.Fatalf("bad: ttl: expected:36000 actual:%d", resp.Data["ttl"].(time.Duration))
+ }
+ if resp.Data["max_ttl"].(time.Duration) != 72000 {
+ t.Fatalf("bad: ttl: expected:72000 actual:%d", resp.Data["ttl"].(time.Duration))
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_role_create.go b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_role_create.go
new file mode 100644
index 0000000..240eb2a
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_role_create.go
@@ -0,0 +1,120 @@
+package rabbitmq
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ "github.com/michaelklishin/rabbit-hole"
+)
+
+func pathCreds(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "creds/" + framework.GenericNameRegex("name"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathCredsRead,
+ },
+
+ HelpSynopsis: pathRoleCreateReadHelpSyn,
+ HelpDescription: pathRoleCreateReadHelpDesc,
+ }
+}
+
+// Issues the credential based on the role name
+func (b *backend) pathCredsRead(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ name := d.Get("name").(string)
+ if name == "" {
+ return logical.ErrorResponse("missing name"), nil
+ }
+
+ // Get the role
+ role, err := b.Role(req.Storage, name)
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return logical.ErrorResponse(fmt.Sprintf("unknown role: %s", name)), nil
+ }
+
+ // Ensure username is unique
+ uuidVal, err := uuid.GenerateUUID()
+ if err != nil {
+ return nil, err
+ }
+ username := fmt.Sprintf("%s-%s", req.DisplayName, uuidVal)
+
+ password, err := uuid.GenerateUUID()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get the client configuration
+ client, err := b.Client(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+ if client == nil {
+ return logical.ErrorResponse("failed to get the client"), nil
+ }
+
+ // Register the generated credentials in the backend, with the RabbitMQ server
+ if _, err = client.PutUser(username, rabbithole.UserSettings{
+ Password: password,
+ Tags: role.Tags,
+ }); err != nil {
+ return nil, fmt.Errorf("failed to create a new user with the generated credentials")
+ }
+
+ // If the role had vhost permissions specified, assign those permissions
+ // to the created username for respective vhosts.
+ for vhost, permission := range role.VHosts {
+ if _, err := client.UpdatePermissionsIn(vhost, username, rabbithole.Permissions{
+ Configure: permission.Configure,
+ Write: permission.Write,
+ Read: permission.Read,
+ }); err != nil {
+ // Delete the user because it's in an unknown state
+ if _, rmErr := client.DeleteUser(username); rmErr != nil {
+ return nil, fmt.Errorf("failed to delete user:%s, err: %s. %s", username, err, rmErr)
+ }
+ return nil, fmt.Errorf("failed to update permissions to the %s user. err:%s", username, err)
+ }
+ }
+
+ // Return the secret
+ resp := b.Secret(SecretCredsType).Response(map[string]interface{}{
+ "username": username,
+ "password": password,
+ }, map[string]interface{}{
+ "username": username,
+ })
+
+ // Determine if we have a lease
+ lease, err := b.Lease(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+ if lease != nil {
+ resp.Secret.TTL = lease.TTL
+ }
+
+ return resp, nil
+}
+
+const pathRoleCreateReadHelpSyn = `
+Request RabbitMQ credentials for a certain role.
+`
+
+const pathRoleCreateReadHelpDesc = `
+This path reads RabbitMQ credentials for a certain role. The
+RabbitMQ credentials will be generated on demand and will be automatically
+revoked when the lease is up.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_roles.go b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_roles.go
new file mode 100644
index 0000000..bb03d3d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_roles.go
@@ -0,0 +1,181 @@
+package rabbitmq
+
+import (
+ "fmt"
+
+ "github.com/fatih/structs"
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathListRoles(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "roles/?$",
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.pathRoleList,
+ },
+ HelpSynopsis: pathRoleHelpSyn,
+ HelpDescription: pathRoleHelpDesc,
+ }
+}
+
+func pathRoles(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "roles/" + framework.GenericNameRegex("name"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role.",
+ },
+ "tags": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Comma-separated list of tags for this role.",
+ },
+ "vhosts": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "A map of virtual hosts to permissions.",
+ },
+ },
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathRoleRead,
+ logical.UpdateOperation: b.pathRoleUpdate,
+ logical.DeleteOperation: b.pathRoleDelete,
+ },
+ HelpSynopsis: pathRoleHelpSyn,
+ HelpDescription: pathRoleHelpDesc,
+ }
+}
+
+// Reads the role configuration from the storage
+func (b *backend) Role(s logical.Storage, n string) (*roleEntry, error) {
+ entry, err := s.Get("role/" + n)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result roleEntry
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+// Deletes an existing role
+func (b *backend) pathRoleDelete(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ name := d.Get("name").(string)
+ if name == "" {
+ return logical.ErrorResponse("missing name"), nil
+ }
+
+ return nil, req.Storage.Delete("role/" + name)
+}
+
+// Reads an existing role
+func (b *backend) pathRoleRead(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ name := d.Get("name").(string)
+ if name == "" {
+ return logical.ErrorResponse("missing name"), nil
+ }
+
+ role, err := b.Role(req.Storage, name)
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ return &logical.Response{
+ Data: structs.New(role).Map(),
+ }, nil
+}
+
+// Lists all the roles registered with the backend
+func (b *backend) pathRoleList(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ roles, err := req.Storage.List("role/")
+ if err != nil {
+ return nil, err
+ }
+
+ return logical.ListResponse(roles), nil
+}
+
+// Registers a new role with the backend
+func (b *backend) pathRoleUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ name := d.Get("name").(string)
+ if name == "" {
+ return logical.ErrorResponse("missing name"), nil
+ }
+
+ tags := d.Get("tags").(string)
+ rawVHosts := d.Get("vhosts").(string)
+
+ if tags == "" && rawVHosts == "" {
+ return logical.ErrorResponse("both tags and vhosts not specified"), nil
+ }
+
+ var vhosts map[string]vhostPermission
+ if len(rawVHosts) > 0 {
+ if err := jsonutil.DecodeJSON([]byte(rawVHosts), &vhosts); err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("failed to unmarshal vhosts: %s", err)), nil
+ }
+ }
+
+ // Store it
+ entry, err := logical.StorageEntryJSON("role/"+name, &roleEntry{
+ Tags: tags,
+ VHosts: vhosts,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+// Role that defines the capabilities of the credentials issued against it
+type roleEntry struct {
+ Tags string `json:"tags" structs:"tags" mapstructure:"tags"`
+ VHosts map[string]vhostPermission `json:"vhosts" structs:"vhosts" mapstructure:"vhosts"`
+}
+
+// Structure representing the permissions of a vhost
+type vhostPermission struct {
+ Configure string `json:"configure" structs:"configure" mapstructure:"configure"`
+ Write string `json:"write" structs:"write" mapstructure:"write"`
+ Read string `json:"read" structs:"read" mapstructure:"read"`
+}
+
+const pathRoleHelpSyn = `
+Manage the roles that can be created with this backend.
+`
+
+const pathRoleHelpDesc = `
+This path lets you manage the roles that can be created with this backend.
+
+The "tags" parameter customizes the tags used to create the role.
+This is a comma separated list of strings. The "vhosts" parameter customizes
+the virtual hosts that this user will be associated with. This is a JSON object
+passed as a string in the form:
+{
+ "vhostOne": {
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ },
+ "vhostTwo": {
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ }
+}
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/secret_creds.go b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/secret_creds.go
new file mode 100644
index 0000000..f59bb1a
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/secret_creds.go
@@ -0,0 +1,67 @@
+package rabbitmq
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+// SecretCredsType is the key for this backend's secrets.
+const SecretCredsType = "creds"
+
+func secretCreds(b *backend) *framework.Secret {
+ return &framework.Secret{
+ Type: SecretCredsType,
+ Fields: map[string]*framework.FieldSchema{
+ "username": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "RabbitMQ username",
+ },
+ "password": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Password for the RabbitMQ username",
+ },
+ },
+ Renew: b.secretCredsRenew,
+ Revoke: b.secretCredsRevoke,
+ }
+}
+
+// Renew the previously issued secret
+func (b *backend) secretCredsRenew(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ // Get the lease information
+ lease, err := b.Lease(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+ if lease == nil {
+ lease = &configLease{}
+ }
+
+ return framework.LeaseExtend(lease.TTL, lease.MaxTTL, b.System())(req, d)
+}
+
+// Revoke the previously issued secret
+func (b *backend) secretCredsRevoke(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ // Get the username from the internal data
+ usernameRaw, ok := req.Secret.InternalData["username"]
+ if !ok {
+ return nil, fmt.Errorf("secret is missing username internal data")
+ }
+ username := usernameRaw.(string)
+
+ // Get our connection
+ client, err := b.Client(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+
+ if _, err = client.DeleteUser(username); err != nil {
+ return nil, fmt.Errorf("could not delete user: %s", err)
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/backend.go
new file mode 100644
index 0000000..dcfb00d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/backend.go
@@ -0,0 +1,91 @@
+package ssh
+
+import (
+ "strings"
+
+ "github.com/hashicorp/vault/helper/salt"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+type backend struct {
+ *framework.Backend
+ view logical.Storage
+ salt *salt.Salt
+}
+
+func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
+ b, err := Backend(conf)
+ if err != nil {
+ return nil, err
+ }
+ return b.Setup(conf)
+}
+
+func Backend(conf *logical.BackendConfig) (*backend, error) {
+ var b backend
+ b.view = conf.StorageView
+ b.Backend = &framework.Backend{
+ Help: strings.TrimSpace(backendHelp),
+
+ PathsSpecial: &logical.Paths{
+ Unauthenticated: []string{
+ "verify",
+ "public_key",
+ },
+
+ LocalStorage: []string{
+ "otp/",
+ },
+ },
+
+ Paths: []*framework.Path{
+ pathConfigZeroAddress(&b),
+ pathKeys(&b),
+ pathListRoles(&b),
+ pathRoles(&b),
+ pathCredsCreate(&b),
+ pathLookup(&b),
+ pathVerify(&b),
+ pathConfigCA(&b),
+ pathSign(&b),
+ pathFetchPublicKey(&b),
+ },
+
+ Secrets: []*framework.Secret{
+ secretDynamicKey(&b),
+ secretOTP(&b),
+ },
+
+ Init: b.Initialize,
+ }
+ return &b, nil
+}
+
+func (b *backend) Initialize() error {
+ salt, err := salt.NewSalt(b.view, &salt.Config{
+ HashFunc: salt.SHA256Hash,
+ })
+ if err != nil {
+ return err
+ }
+ b.salt = salt
+ return nil
+}
+
+const backendHelp = `
+The SSH backend generates credentials allowing clients to establish SSH
+connections to remote hosts.
+
+There are three variants of the backend, which generate different types of
+credentials: dynamic keys, One-Time Passwords (OTPs) and certificate authority. The desired behavior
+is role-specific and chosen at role creation time with the 'key_type'
+parameter.
+
+Please see the backend documentation for a thorough description of both
+types. The Vault team strongly recommends the OTP type.
+
+After mounting this backend, before generating credentials, configure the
+backend's lease behavior using the 'config/lease' endpoint and create roles
+using the 'roles/' endpoint.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/backend_test.go
new file mode 100644
index 0000000..538455c
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/backend_test.go
@@ -0,0 +1,964 @@
+package ssh
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+ "time"
+
+ "golang.org/x/crypto/ssh"
+
+ "encoding/base64"
+ "errors"
+ "strings"
+
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/logical"
+ logicaltest "github.com/hashicorp/vault/logical/testing"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/mapstructure"
+)
+
+// Before the following tests are run, a username going by the name 'vaultssh' has
+// to be created and its ~/.ssh/authorized_keys file should contain the below key.
+//
+// ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9i+hFxZHGo6KblVme4zrAcJstR6I0PTJozW286X4WyvPnkMYDQ5mnhEYC7UWCvjoTWbPEXPX7NjhRtwQTGD67bV+lrxgfyzK1JZbUXK4PwgKJvQD+XyyWYMzDgGSQY61KUSqCxymSm/9NZkPU3ElaQ9xQuTzPpztM4ROfb8f2Yv6/ZESZsTo0MTAkp8Pcy+WkioI/uJ1H7zqs0EA4OMY4aDJRu0UtP4rTVeYNEAuRXdX+eH4aW3KMvhzpFTjMbaJHJXlEeUm2SaX5TNQyTOvghCeQILfYIL/Ca2ij8iwCmulwdV6eQGfd4VDu40PvSnmfoaE38o6HaPnX0kUcnKiT
+
+const (
+ testIP = "127.0.0.1"
+ testUserName = "vaultssh"
+ testAdminUser = "vaultssh"
+ testOTPKeyType = "otp"
+ testDynamicKeyType = "dynamic"
+ testCIDRList = "127.0.0.1/32"
+ testDynamicRoleName = "testDynamicRoleName"
+ testOTPRoleName = "testOTPRoleName"
+ testKeyName = "testKeyName"
+ testSharedPrivateKey = `
+-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEAvYvoRcWRxqOim5VZnuM6wHCbLUeiND0yaM1tvOl+Fsrz55DG
+A0OZp4RGAu1Fgr46E1mzxFz1+zY4UbcEExg+u21fpa8YH8sytSWW1FyuD8ICib0A
+/l8slmDMw4BkkGOtSlEqgscpkpv/TWZD1NxJWkPcULk8z6c7TOETn2/H9mL+v2RE
+mbE6NDEwJKfD3MvlpIqCP7idR+86rNBAODjGOGgyUbtFLT+K01XmDRALkV3V/nh+
+GltyjL4c6RU4zG2iRyV5RHlJtkml+UzUMkzr4IQnkCC32CC/wmtoo/IsAprpcHVe
+nkBn3eFQ7uND70p5n6GhN/KOh2j519JFHJyokwIDAQABAoIBAHX7VOvBC3kCN9/x
++aPdup84OE7Z7MvpX6w+WlUhXVugnmsAAVDczhKoUc/WktLLx2huCGhsmKvyVuH+
+MioUiE+vx75gm3qGx5xbtmOfALVMRLopjCnJYf6EaFA0ZeQ+NwowNW7Lu0PHmAU8
+Z3JiX8IwxTz14DU82buDyewO7v+cEr97AnERe3PUcSTDoUXNaoNxjNpEJkKREY6h
+4hAY676RT/GsRcQ8tqe/rnCqPHNd7JGqL+207FK4tJw7daoBjQyijWuB7K5chSal
+oPInylM6b13ASXuOAOT/2uSUBWmFVCZPDCmnZxy2SdnJGbsJAMl7Ma3MUlaGvVI+
+Tfh1aQkCgYEA4JlNOabTb3z42wz6mz+Nz3JRwbawD+PJXOk5JsSnV7DtPtfgkK9y
+6FTQdhnozGWShAvJvc+C4QAihs9AlHXoaBY5bEU7R/8UK/pSqwzam+MmxmhVDV7G
+IMQPV0FteoXTaJSikhZ88mETTegI2mik+zleBpVxvfdhE5TR+lq8Br0CgYEA2AwJ
+CUD5CYUSj09PluR0HHqamWOrJkKPFPwa+5eiTTCzfBBxImYZh7nXnWuoviXC0sg2
+AuvCW+uZ48ygv/D8gcz3j1JfbErKZJuV+TotK9rRtNIF5Ub7qysP7UjyI7zCssVM
+kuDd9LfRXaB/qGAHNkcDA8NxmHW3gpln4CFdSY8CgYANs4xwfercHEWaJ1qKagAe
+rZyrMpffAEhicJ/Z65lB0jtG4CiE6w8ZeUMWUVJQVcnwYD+4YpZbX4S7sJ0B8Ydy
+AhkSr86D/92dKTIt2STk6aCN7gNyQ1vW198PtaAWH1/cO2UHgHOy3ZUt5X/Uwxl9
+cex4flln+1Viumts2GgsCQKBgCJH7psgSyPekK5auFdKEr5+Gc/jB8I/Z3K9+g4X
+5nH3G1PBTCJYLw7hRzw8W/8oALzvddqKzEFHphiGXK94Lqjt/A4q1OdbCrhiE68D
+My21P/dAKB1UYRSs9Y8CNyHCjuZM9jSMJ8vv6vG/SOJPsnVDWVAckAbQDvlTHC9t
+O98zAoGAcbW6uFDkrv0XMCpB9Su3KaNXOR0wzag+WIFQRXCcoTvxVi9iYfUReQPi
+oOyBJU/HMVvBfv4g+OVFLVgSwwm6owwsouZ0+D/LasbuHqYyqYqdyPJQYzWA2Y+F
++B6f4RoPdSXj24JHPg/ioRxjaj094UXJxua2yfkcecGNEuBQHSs=
+-----END RSA PRIVATE KEY-----
+`
+ // Public half of `privateKey`, identical to how it would be fed in from a file
+ publicKey = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDArgK0ilRRfk8E7HIsjz5l3BuxmwpDd8DHRCVfOhbZ4gOSVxjEOOqBwWGjygdboBIZwFXmwDlU6sWX0hBJAgpQz0Cjvbjxtq/NjkvATrYPgnrXUhTaEn2eQO0PsqRNSFH46SK/oJfTp0q8/WgojxWJ2L7FUV8PO8uIk49DzqAqPV7WXU63vFsjx+3WQOX/ILeQvHCvaqs3dWjjzEoDudRWCOdUqcHEOshV9azIzPrXlQVzRV3QAKl6u7pC+/Secorpwt6IHpMKoVPGiR0tMMuNOVH8zrAKzIxPGfy2WmNDpJopbXMTvSOGAqNcp49O4SKOQl9Fzfq2HEevJamKLrMB dummy@example.com
+`
+ publicKey2 = `AAAAB3NzaC1yc2EAAAADAQABAAABAQDArgK0ilRRfk8E7HIsjz5l3BuxmwpDd8DHRCVfOhbZ4gOSVxjEOOqBwWGjygdboBIZwFXmwDlU6sWX0hBJAgpQz0Cjvbjxtq/NjkvATrYPgnrXUhTaEn2eQO0PsqRNSFH46SK/oJfTp0q8/WgojxWJ2L7FUV8PO8uIk49DzqAqPV7WXU63vFsjx+3WQOX/ILeQvHCvaqs3dWjjzEoDudRWCOdUqcHEOshV9azIzPrXlQVzRV3QAKl6u7pC+/Secorpwt6IHpMKoVPGiR0tMMuNOVH8zrAKzIxPGfy2WmNDpJopbXMTvSOGAqNcp49O4SKOQl9Fzfq2HEevJamKLrMB
+`
+ privateKey = `-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAwK4CtIpUUX5PBOxyLI8+ZdwbsZsKQ3fAx0QlXzoW2eIDklcY
+xDjqgcFho8oHW6ASGcBV5sA5VOrFl9IQSQIKUM9Ao7248bavzY5LwE62D4J611IU
+2hJ9nkDtD7KkTUhR+Okiv6CX06dKvP1oKI8Vidi+xVFfDzvLiJOPQ86gKj1e1l1O
+t7xbI8ft1kDl/yC3kLxwr2qrN3Vo48xKA7nUVgjnVKnBxDrIVfWsyMz615UFc0Vd
+0ACperu6Qvv0nnKK6cLeiB6TCqFTxokdLTDLjTlR/M6wCsyMTxn8tlpjQ6SaKW1z
+E70jhgKjXKePTuEijkJfRc36thxHryWpii6zAQIDAQABAoIBAA/DrPD8iF2KigiL
+F+RRa/eFhLaJStOuTpV/G9eotwnolgY5Hguf5H/tRIHUG7oBZLm6pMyWWZp7AuOj
+CjYO9q0Z5939vc349nVI+SWoyviF4msPiik1bhWulja8lPjFu/8zg+ZNy15Dx7ei
+vAzleAupMiKOv8pNSB/KguQ3WZ9a9bcQcoFQ2Foru6mXpLJ03kghVRlkqvQ7t5cA
+n11d2Hiipq9mleESr0c+MUPKLBX/neaWfGA4xgJTjIYjZi6avmYc/Ox3sQ9aLq2J
+tH0D4HVUZvaU28hn+jhbs64rRFbu++qQMe3vNvi/Q/iqcYU4b6tgDNzm/JFRTS/W
+njiz4mkCgYEA44CnQVmonN6qQ0AgNNlBY5+RX3wwBJZ1AaxpzwDRylAt2vlVUA0n
+YY4RW4J4+RMRKwHwjxK5RRmHjsIJx+nrpqihW3fte3ev5F2A9Wha4dzzEHxBY6IL
+362T/x2f+vYk6tV+uTZSUPHsuELH26mitbBVFNB/00nbMNdEc2bO5FMCgYEA2NCw
+ubt+g2bRkkT/Qf8gIM8ZDpZbARt6onqxVcWkQFT16ZjbsBWUrH1Xi7alv9+lwYLJ
+ckY/XDX4KeU19HabeAbpyy6G9Q2uBSWZlJbjl7QNhdLeuzV82U1/r8fy6Uu3gQnU
+WSFx2GesRpSmZpqNKMs5ksqteZ9Yjg1EIgXdINsCgYBIn9REt1NtKGOf7kOZu1T1
+cYXdvm4xuLoHW7u3OiK+e9P3mCqU0G4m5UxDMyZdFKohWZAqjCaamWi9uNGYgOMa
+I7DG20TzaiS7OOIm9TY17eul8pSJMrypnealxRZB7fug/6Bhjaa/cktIEwFr7P4l
+E/JFH73+fBA9yipu0H3xQwKBgHmiwrLAZF6VrVcxDD9bQQwHA5iyc4Wwg+Fpkdl7
+0wUgZQHTdtRXlxwaCaZhJqX5c4WXuSo6DMvPn1TpuZZXgCsbPch2ZtJOBWXvzTSW
+XkK6iaedQMWoYU2L8+mK9FU73EwxVodWgwcUSosiVCRV6oGLWdZnjGEiK00uVh38
+Si1nAoGBAL47wWinv1cDTnh5mm0mybz3oI2a6V9aIYCloQ/EFcvtahyR/gyB8qNF
+lObH9Faf0WGdnACZvTz22U9gWhw79S0SpDV31tC5Kl8dXHFiZ09vYUKkYmSd/kms
+SeKWrUkryx46LVf6NMhkyYmRqCEjBwfOozzezi5WbiJy6nn54GQt
+-----END RSA PRIVATE KEY-----
+`
+)
+
+func TestBackend_allowed_users(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+
+ b, err := Backend(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = b.Setup(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = b.Initialize()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ roleData := map[string]interface{}{
+ "key_type": "otp",
+ "default_user": "ubuntu",
+ "cidr_list": "52.207.235.245/16",
+ "allowed_users": "test",
+ }
+
+ roleReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "roles/role1",
+ Storage: config.StorageView,
+ Data: roleData,
+ }
+
+ resp, err := b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) || resp != nil {
+ t.Fatalf("failed to create role: resp:%#v err:%s", resp, err)
+ }
+
+ credsData := map[string]interface{}{
+ "ip": "52.207.235.245",
+ "username": "ubuntu",
+ }
+ credsReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Storage: config.StorageView,
+ Path: "creds/role1",
+ Data: credsData,
+ }
+
+ resp, err = b.HandleRequest(credsReq)
+ if err != nil || (resp != nil && resp.IsError()) || resp == nil {
+ t.Fatalf("failed to create role: resp:%#v err:%s", resp, err)
+ }
+ if resp.Data["key"] == "" ||
+ resp.Data["key_type"] != "otp" ||
+ resp.Data["ip"] != "52.207.235.245" ||
+ resp.Data["username"] != "ubuntu" {
+ t.Fatalf("failed to create credential: resp:%#v", resp)
+ }
+
+ credsData["username"] = "test"
+ resp, err = b.HandleRequest(credsReq)
+ if err != nil || (resp != nil && resp.IsError()) || resp == nil {
+ t.Fatalf("failed to create role: resp:%#v err:%s", resp, err)
+ }
+ if resp.Data["key"] == "" ||
+ resp.Data["key_type"] != "otp" ||
+ resp.Data["ip"] != "52.207.235.245" ||
+ resp.Data["username"] != "test" {
+ t.Fatalf("failed to create credential: resp:%#v", resp)
+ }
+
+ credsData["username"] = "random"
+ resp, err = b.HandleRequest(credsReq)
+ if err != nil || resp == nil || (resp != nil && !resp.IsError()) {
+ t.Fatalf("expected failure: resp:%#v err:%s", resp, err)
+ }
+
+ delete(roleData, "allowed_users")
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) || resp != nil {
+ t.Fatalf("failed to create role: resp:%#v err:%s", resp, err)
+ }
+
+ credsData["username"] = "ubuntu"
+ resp, err = b.HandleRequest(credsReq)
+ if err != nil || (resp != nil && resp.IsError()) || resp == nil {
+ t.Fatalf("failed to create role: resp:%#v err:%s", resp, err)
+ }
+ if resp.Data["key"] == "" ||
+ resp.Data["key_type"] != "otp" ||
+ resp.Data["ip"] != "52.207.235.245" ||
+ resp.Data["username"] != "ubuntu" {
+ t.Fatalf("failed to create credential: resp:%#v", resp)
+ }
+
+ credsData["username"] = "test"
+ resp, err = b.HandleRequest(credsReq)
+ if err != nil || resp == nil || (resp != nil && !resp.IsError()) {
+ t.Fatalf("expected failure: resp:%#v err:%s", resp, err)
+ }
+
+ roleData["allowed_users"] = "*"
+ resp, err = b.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) || resp != nil {
+ t.Fatalf("failed to create role: resp:%#v err:%s", resp, err)
+ }
+
+ resp, err = b.HandleRequest(credsReq)
+ if err != nil || (resp != nil && resp.IsError()) || resp == nil {
+ t.Fatalf("failed to create role: resp:%#v err:%s", resp, err)
+ }
+ if resp.Data["key"] == "" ||
+ resp.Data["key_type"] != "otp" ||
+ resp.Data["ip"] != "52.207.235.245" ||
+ resp.Data["username"] != "test" {
+ t.Fatalf("failed to create credential: resp:%#v", resp)
+ }
+}
+
+func testingFactory(conf *logical.BackendConfig) (logical.Backend, error) {
+ _, err := vault.StartSSHHostTestServer()
+ if err != nil {
+ panic(fmt.Sprintf("error starting mock server:%s", err))
+ }
+ defaultLeaseTTLVal := 2 * time.Minute
+ maxLeaseTTLVal := 10 * time.Minute
+ return Factory(&logical.BackendConfig{
+ Logger: nil,
+ StorageView: &logical.InmemStorage{},
+ System: &logical.StaticSystemView{
+ DefaultLeaseTTLVal: defaultLeaseTTLVal,
+ MaxLeaseTTLVal: maxLeaseTTLVal,
+ },
+ })
+}
+
+func TestSSHBackend_Lookup(t *testing.T) {
+ testOTPRoleData := map[string]interface{}{
+ "key_type": testOTPKeyType,
+ "default_user": testUserName,
+ "cidr_list": testCIDRList,
+ }
+ testDynamicRoleData := map[string]interface{}{
+ "key_type": testDynamicKeyType,
+ "key": testKeyName,
+ "admin_user": testAdminUser,
+ "default_user": testAdminUser,
+ "cidr_list": testCIDRList,
+ }
+ data := map[string]interface{}{
+ "ip": testIP,
+ }
+ resp1 := []string(nil)
+ resp2 := []string{testOTPRoleName}
+ resp3 := []string{testDynamicRoleName, testOTPRoleName}
+ resp4 := []string{testDynamicRoleName}
+ logicaltest.Test(t, logicaltest.TestCase{
+ AcceptanceTest: true,
+ Factory: testingFactory,
+ Steps: []logicaltest.TestStep{
+ testLookupRead(t, data, resp1),
+ testRoleWrite(t, testOTPRoleName, testOTPRoleData),
+ testLookupRead(t, data, resp2),
+ testNamedKeysWrite(t, testKeyName, testSharedPrivateKey),
+ testRoleWrite(t, testDynamicRoleName, testDynamicRoleData),
+ testLookupRead(t, data, resp3),
+ testRoleDelete(t, testOTPRoleName),
+ testLookupRead(t, data, resp4),
+ testRoleDelete(t, testDynamicRoleName),
+ testLookupRead(t, data, resp1),
+ },
+ })
+}
+
+func TestSSHBackend_DynamicKeyCreate(t *testing.T) {
+ testDynamicRoleData := map[string]interface{}{
+ "key_type": testDynamicKeyType,
+ "key": testKeyName,
+ "admin_user": testAdminUser,
+ "default_user": testAdminUser,
+ "cidr_list": testCIDRList,
+ }
+ data := map[string]interface{}{
+ "username": testUserName,
+ "ip": testIP,
+ }
+ logicaltest.Test(t, logicaltest.TestCase{
+ AcceptanceTest: true,
+ Factory: testingFactory,
+ Steps: []logicaltest.TestStep{
+ testNamedKeysWrite(t, testKeyName, testSharedPrivateKey),
+ testRoleWrite(t, testDynamicRoleName, testDynamicRoleData),
+ testCredsWrite(t, testDynamicRoleName, data, false),
+ },
+ })
+}
+
+func TestSSHBackend_OTPRoleCrud(t *testing.T) {
+ testOTPRoleData := map[string]interface{}{
+ "key_type": testOTPKeyType,
+ "default_user": testUserName,
+ "cidr_list": testCIDRList,
+ }
+ respOTPRoleData := map[string]interface{}{
+ "key_type": testOTPKeyType,
+ "port": 22,
+ "default_user": testUserName,
+ "cidr_list": testCIDRList,
+ }
+ logicaltest.Test(t, logicaltest.TestCase{
+ AcceptanceTest: true,
+ Factory: testingFactory,
+ Steps: []logicaltest.TestStep{
+ testRoleWrite(t, testOTPRoleName, testOTPRoleData),
+ testRoleRead(t, testOTPRoleName, respOTPRoleData),
+ testRoleDelete(t, testOTPRoleName),
+ testRoleRead(t, testOTPRoleName, nil),
+ },
+ })
+}
+
+func TestSSHBackend_DynamicRoleCrud(t *testing.T) {
+ testDynamicRoleData := map[string]interface{}{
+ "key_type": testDynamicKeyType,
+ "key": testKeyName,
+ "admin_user": testAdminUser,
+ "default_user": testAdminUser,
+ "cidr_list": testCIDRList,
+ }
+ respDynamicRoleData := map[string]interface{}{
+ "cidr_list": testCIDRList,
+ "port": 22,
+ "install_script": DefaultPublicKeyInstallScript,
+ "key_bits": 1024,
+ "key": testKeyName,
+ "admin_user": testUserName,
+ "default_user": testUserName,
+ "key_type": testDynamicKeyType,
+ }
+ logicaltest.Test(t, logicaltest.TestCase{
+ AcceptanceTest: true,
+ Factory: testingFactory,
+ Steps: []logicaltest.TestStep{
+ testNamedKeysWrite(t, testKeyName, testSharedPrivateKey),
+ testRoleWrite(t, testDynamicRoleName, testDynamicRoleData),
+ testRoleRead(t, testDynamicRoleName, respDynamicRoleData),
+ testRoleDelete(t, testDynamicRoleName),
+ testRoleRead(t, testDynamicRoleName, nil),
+ },
+ })
+}
+
+func TestSSHBackend_NamedKeysCrud(t *testing.T) {
+ logicaltest.Test(t, logicaltest.TestCase{
+ AcceptanceTest: true,
+ Factory: testingFactory,
+ Steps: []logicaltest.TestStep{
+ testNamedKeysWrite(t, testKeyName, testSharedPrivateKey),
+ testNamedKeysDelete(t),
+ },
+ })
+}
+
+func TestSSHBackend_OTPCreate(t *testing.T) {
+ testOTPRoleData := map[string]interface{}{
+ "key_type": testOTPKeyType,
+ "default_user": testUserName,
+ "cidr_list": testCIDRList,
+ }
+ data := map[string]interface{}{
+ "username": testUserName,
+ "ip": testIP,
+ }
+ logicaltest.Test(t, logicaltest.TestCase{
+ AcceptanceTest: true,
+ Factory: testingFactory,
+ Steps: []logicaltest.TestStep{
+ testRoleWrite(t, testOTPRoleName, testOTPRoleData),
+ testCredsWrite(t, testOTPRoleName, data, false),
+ },
+ })
+}
+
+func TestSSHBackend_VerifyEcho(t *testing.T) {
+ verifyData := map[string]interface{}{
+ "otp": api.VerifyEchoRequest,
+ }
+ expectedData := map[string]interface{}{
+ "message": api.VerifyEchoResponse,
+ }
+ logicaltest.Test(t, logicaltest.TestCase{
+ AcceptanceTest: true,
+ Factory: testingFactory,
+ Steps: []logicaltest.TestStep{
+ testVerifyWrite(t, verifyData, expectedData),
+ },
+ })
+}
+
+func TestSSHBackend_ConfigZeroAddressCRUD(t *testing.T) {
+ testOTPRoleData := map[string]interface{}{
+ "key_type": testOTPKeyType,
+ "default_user": testUserName,
+ "cidr_list": testCIDRList,
+ }
+ testDynamicRoleData := map[string]interface{}{
+ "key_type": testDynamicKeyType,
+ "key": testKeyName,
+ "admin_user": testAdminUser,
+ "default_user": testAdminUser,
+ "cidr_list": testCIDRList,
+ }
+ req1 := map[string]interface{}{
+ "roles": testOTPRoleName,
+ }
+ resp1 := map[string]interface{}{
+ "roles": []string{testOTPRoleName},
+ }
+ req2 := map[string]interface{}{
+ "roles": fmt.Sprintf("%s,%s", testOTPRoleName, testDynamicRoleName),
+ }
+ resp2 := map[string]interface{}{
+ "roles": []string{testOTPRoleName, testDynamicRoleName},
+ }
+ resp3 := map[string]interface{}{
+ "roles": []string{},
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ AcceptanceTest: true,
+ Factory: testingFactory,
+ Steps: []logicaltest.TestStep{
+ testRoleWrite(t, testOTPRoleName, testOTPRoleData),
+ testConfigZeroAddressWrite(t, req1),
+ testConfigZeroAddressRead(t, resp1),
+ testNamedKeysWrite(t, testKeyName, testSharedPrivateKey),
+ testRoleWrite(t, testDynamicRoleName, testDynamicRoleData),
+ testConfigZeroAddressWrite(t, req2),
+ testConfigZeroAddressRead(t, resp2),
+ testRoleDelete(t, testDynamicRoleName),
+ testConfigZeroAddressRead(t, resp1),
+ testRoleDelete(t, testOTPRoleName),
+ testConfigZeroAddressRead(t, resp3),
+ testConfigZeroAddressDelete(t),
+ },
+ })
+}
+
+func TestSSHBackend_CredsForZeroAddressRoles(t *testing.T) {
+ dynamicRoleData := map[string]interface{}{
+ "key_type": testDynamicKeyType,
+ "key": testKeyName,
+ "admin_user": testAdminUser,
+ "default_user": testAdminUser,
+ }
+ otpRoleData := map[string]interface{}{
+ "key_type": testOTPKeyType,
+ "default_user": testUserName,
+ }
+ data := map[string]interface{}{
+ "username": testUserName,
+ "ip": testIP,
+ }
+ req1 := map[string]interface{}{
+ "roles": testOTPRoleName,
+ }
+ req2 := map[string]interface{}{
+ "roles": fmt.Sprintf("%s,%s", testOTPRoleName, testDynamicRoleName),
+ }
+ logicaltest.Test(t, logicaltest.TestCase{
+ AcceptanceTest: true,
+ Factory: testingFactory,
+ Steps: []logicaltest.TestStep{
+ testRoleWrite(t, testOTPRoleName, otpRoleData),
+ testCredsWrite(t, testOTPRoleName, data, true),
+ testConfigZeroAddressWrite(t, req1),
+ testCredsWrite(t, testOTPRoleName, data, false),
+ testNamedKeysWrite(t, testKeyName, testSharedPrivateKey),
+ testRoleWrite(t, testDynamicRoleName, dynamicRoleData),
+ testCredsWrite(t, testDynamicRoleName, data, true),
+ testConfigZeroAddressWrite(t, req2),
+ testCredsWrite(t, testDynamicRoleName, data, false),
+ testConfigZeroAddressDelete(t),
+ testCredsWrite(t, testOTPRoleName, data, true),
+ testCredsWrite(t, testDynamicRoleName, data, true),
+ },
+ })
+}
+
+func TestBackend_AbleToRetrievePublicKey(t *testing.T) {
+
+ config := logical.TestBackendConfig()
+
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatalf("Cannot create backend: %s", err)
+ }
+
+ testCase := logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ configCaStep(),
+
+ logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "public_key",
+ Unauthenticated: true,
+
+ Check: func(resp *logical.Response) error {
+
+ key := string(resp.Data["http_raw_body"].([]byte))
+
+ if key != publicKey {
+ return fmt.Errorf("public_key incorrect. Expected %v, actual %v", publicKey, key)
+ }
+
+ return nil
+ },
+ },
+ },
+ }
+
+ logicaltest.Test(t, testCase)
+}
+
+func TestBackend_AbleToAutoGenerateSigningKeys(t *testing.T) {
+
+ config := logical.TestBackendConfig()
+
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatalf("Cannot create backend: %s", err)
+ }
+
+ testCase := logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config/ca",
+ },
+
+ logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "public_key",
+ Unauthenticated: true,
+
+ Check: func(resp *logical.Response) error {
+
+ key := string(resp.Data["http_raw_body"].([]byte))
+
+ if key == "" {
+ return fmt.Errorf("public_key empty. Expected not empty, actual %s", key)
+ }
+
+ return nil
+ },
+ },
+ },
+ }
+
+ logicaltest.Test(t, testCase)
+}
+
+func TestBackend_ValidPrincipalsValidatedForHostCertificates(t *testing.T) {
+ config := logical.TestBackendConfig()
+
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatalf("Cannot create backend: %s", err)
+ }
+
+ testCase := logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ configCaStep(),
+
+ createRoleStep("testing", map[string]interface{}{
+ "key_type": "ca",
+ "allow_host_certificates": true,
+ "allowed_domains": "example.com,example.org",
+ "allow_subdomains": true,
+ "default_critical_options": map[string]interface{}{
+ "option": "value",
+ },
+ "default_extensions": map[string]interface{}{
+ "extension": "extended",
+ },
+ }),
+
+ signCertificateStep("testing", "vault-root-22608f5ef173aabf700797cb95c5641e792698ec6380e8e1eb55523e39aa5e51", ssh.HostCert, []string{"dummy.example.org", "second.example.com"}, map[string]string{
+ "option": "value",
+ }, map[string]string{
+ "extension": "extended",
+ },
+ 2*time.Hour, map[string]interface{}{
+ "public_key": publicKey2,
+ "ttl": "2h",
+ "cert_type": "host",
+ "valid_principals": "dummy.example.org,second.example.com",
+ }),
+ },
+ }
+
+ logicaltest.Test(t, testCase)
+}
+
+func TestBackend_OptionsOverrideDefaults(t *testing.T) {
+ config := logical.TestBackendConfig()
+
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatalf("Cannot create backend: %s", err)
+ }
+
+ testCase := logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ configCaStep(),
+
+ createRoleStep("testing", map[string]interface{}{
+ "key_type": "ca",
+ "allowed_users": "tuber",
+ "default_user": "tuber",
+ "allow_user_certificates": true,
+ "allowed_critical_options": "option,secondary",
+ "allowed_extensions": "extension,additional",
+ "default_critical_options": map[string]interface{}{
+ "option": "value",
+ },
+ "default_extensions": map[string]interface{}{
+ "extension": "extended",
+ },
+ }),
+
+ signCertificateStep("testing", "vault-root-22608f5ef173aabf700797cb95c5641e792698ec6380e8e1eb55523e39aa5e51", ssh.UserCert, []string{"tuber"}, map[string]string{
+ "secondary": "value",
+ }, map[string]string{
+ "additional": "value",
+ }, 2*time.Hour, map[string]interface{}{
+ "public_key": publicKey2,
+ "ttl": "2h",
+ "critical_options": map[string]interface{}{
+ "secondary": "value",
+ },
+ "extensions": map[string]interface{}{
+ "additional": "value",
+ },
+ }),
+ },
+ }
+
+ logicaltest.Test(t, testCase)
+}
+
+func configCaStep() logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config/ca",
+ Data: map[string]interface{}{
+ "public_key": publicKey,
+ "private_key": privateKey,
+ },
+ }
+}
+
+func createRoleStep(name string, parameters map[string]interface{}) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.CreateOperation,
+ Path: "roles/" + name,
+ Data: parameters,
+ }
+}
+
+func signCertificateStep(
+ role, keyId string, certType int, validPrincipals []string,
+ criticalOptionPermissions, extensionPermissions map[string]string,
+ ttl time.Duration,
+ requestParameters map[string]interface{}) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "sign/" + role,
+ Data: requestParameters,
+
+ Check: func(resp *logical.Response) error {
+
+ serialNumber := resp.Data["serial_number"].(string)
+ if serialNumber == "" {
+ return errors.New("No serial number in response")
+ }
+
+ signedKey := strings.TrimSpace(resp.Data["signed_key"].(string))
+ if signedKey == "" {
+ return errors.New("No signed key in response")
+ }
+
+ key, _ := base64.StdEncoding.DecodeString(strings.Split(signedKey, " ")[1])
+
+ parsedKey, err := ssh.ParsePublicKey(key)
+ if err != nil {
+ return err
+ }
+
+ return validateSSHCertificate(parsedKey.(*ssh.Certificate), keyId, certType, validPrincipals, criticalOptionPermissions, extensionPermissions, ttl)
+ },
+ }
+}
+
+func validateSSHCertificate(cert *ssh.Certificate, keyId string, certType int, validPrincipals []string, criticalOptionPermissions, extensionPermissions map[string]string,
+ ttl time.Duration) error {
+
+ if cert.KeyId != keyId {
+ return fmt.Errorf("Incorrect KeyId: %v, wanted %v", cert.KeyId, keyId)
+ }
+
+ if cert.CertType != uint32(certType) {
+ return fmt.Errorf("Incorrect CertType: %v", cert.CertType)
+ }
+
+ if time.Unix(int64(cert.ValidAfter), 0).After(time.Now()) {
+ return fmt.Errorf("Incorrect ValidAfter: %v", cert.ValidAfter)
+ }
+
+ if time.Unix(int64(cert.ValidBefore), 0).Before(time.Now()) {
+ return fmt.Errorf("Incorrect ValidBefore: %v", cert.ValidBefore)
+ }
+
+ actualTtl := time.Unix(int64(cert.ValidBefore), 0).Add(-30 * time.Second).Sub(time.Unix(int64(cert.ValidAfter), 0))
+ if actualTtl != ttl {
+ return fmt.Errorf("Incorrect ttl: expected: %v, actualL %v", ttl, actualTtl)
+ }
+
+ if !reflect.DeepEqual(cert.ValidPrincipals, validPrincipals) {
+ return fmt.Errorf("Incorrect ValidPrincipals: expected: %#v actual: %#v", validPrincipals, cert.ValidPrincipals)
+ }
+
+ publicSigningKey, err := getSigningPublicKey()
+ if err != nil {
+ return err
+ }
+ if !reflect.DeepEqual(cert.SignatureKey, publicSigningKey) {
+ return fmt.Errorf("Incorrect SignatureKey: %v", cert.SignatureKey)
+ }
+
+ if cert.Signature == nil {
+ return fmt.Errorf("Incorrect Signature: %v", cert.Signature)
+ }
+
+ if !reflect.DeepEqual(cert.Permissions.Extensions, extensionPermissions) {
+ return fmt.Errorf("Incorrect Permissions.Extensions: Expected: %v, Actual: %v", extensionPermissions, cert.Permissions.Extensions)
+ }
+
+ if !reflect.DeepEqual(cert.Permissions.CriticalOptions, criticalOptionPermissions) {
+ return fmt.Errorf("Incorrect Permissions.CriticalOptions: %v", cert.Permissions.CriticalOptions)
+ }
+
+ return nil
+}
+
+func getSigningPublicKey() (ssh.PublicKey, error) {
+ key, err := base64.StdEncoding.DecodeString(strings.Split(publicKey, " ")[1])
+ if err != nil {
+ return nil, err
+ }
+
+ parsedKey, err := ssh.ParsePublicKey(key)
+ if err != nil {
+ return nil, err
+ }
+
+ return parsedKey, nil
+}
+
+func testConfigZeroAddressDelete(t *testing.T) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.DeleteOperation,
+ Path: "config/zeroaddress",
+ }
+}
+
+func testConfigZeroAddressWrite(t *testing.T, data map[string]interface{}) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "config/zeroaddress",
+ Data: data,
+ }
+}
+
+func testConfigZeroAddressRead(t *testing.T, expected map[string]interface{}) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "config/zeroaddress",
+ Check: func(resp *logical.Response) error {
+ var d zeroAddressRoles
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+
+ var ex zeroAddressRoles
+ if err := mapstructure.Decode(expected, &ex); err != nil {
+ return err
+ }
+
+ if !reflect.DeepEqual(d, ex) {
+ return fmt.Errorf("Response mismatch:\nActual:%#v\nExpected:%#v", d, ex)
+ }
+
+ return nil
+ },
+ }
+}
+
+func testVerifyWrite(t *testing.T, data map[string]interface{}, expected map[string]interface{}) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: fmt.Sprintf("verify"),
+ Data: data,
+ Check: func(resp *logical.Response) error {
+ var ac api.SSHVerifyResponse
+ if err := mapstructure.Decode(resp.Data, &ac); err != nil {
+ return err
+ }
+ var ex api.SSHVerifyResponse
+ if err := mapstructure.Decode(expected, &ex); err != nil {
+ return err
+ }
+
+ if !reflect.DeepEqual(ac, ex) {
+ return fmt.Errorf("Invalid response")
+ }
+ return nil
+ },
+ }
+}
+
+func testNamedKeysWrite(t *testing.T, name, key string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: fmt.Sprintf("keys/%s", name),
+ Data: map[string]interface{}{
+ "key": key,
+ },
+ }
+}
+
+func testNamedKeysDelete(t *testing.T) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.DeleteOperation,
+ Path: fmt.Sprintf("keys/%s", testKeyName),
+ }
+}
+
+func testLookupRead(t *testing.T, data map[string]interface{}, expected []string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "lookup",
+ Data: data,
+ Check: func(resp *logical.Response) error {
+ if resp.Data == nil || resp.Data["roles"] == nil {
+ return fmt.Errorf("Missing roles information")
+ }
+ if !reflect.DeepEqual(resp.Data["roles"].([]string), expected) {
+ return fmt.Errorf("Invalid response: \nactual:%#v\nexpected:%#v", resp.Data["roles"].([]string), expected)
+ }
+ return nil
+ },
+ }
+}
+
+func testRoleWrite(t *testing.T, name string, data map[string]interface{}) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "roles/" + name,
+ Data: data,
+ }
+}
+
+func testRoleRead(t *testing.T, roleName string, expected map[string]interface{}) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "roles/" + roleName,
+ Check: func(resp *logical.Response) error {
+ if resp == nil {
+ if expected == nil {
+ return nil
+ }
+ return fmt.Errorf("bad: %#v", resp)
+ }
+ var d sshRole
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return fmt.Errorf("error decoding response:%s", err)
+ }
+ if roleName == testOTPRoleName {
+ if d.KeyType != expected["key_type"] || d.DefaultUser != expected["default_user"] || d.CIDRList != expected["cidr_list"] {
+ return fmt.Errorf("data mismatch. bad: %#v", resp)
+ }
+ } else {
+ if d.AdminUser != expected["admin_user"] || d.CIDRList != expected["cidr_list"] || d.KeyName != expected["key"] || d.KeyType != expected["key_type"] {
+ return fmt.Errorf("data mismatch. bad: %#v", resp)
+ }
+ }
+ return nil
+ },
+ }
+}
+
+func testRoleDelete(t *testing.T, name string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.DeleteOperation,
+ Path: "roles/" + name,
+ }
+}
+
+func testCredsWrite(t *testing.T, roleName string, data map[string]interface{}, expectError bool) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: fmt.Sprintf("creds/%s", roleName),
+ Data: data,
+ ErrorOk: true,
+ Check: func(resp *logical.Response) error {
+ if resp == nil {
+ return fmt.Errorf("response is nil")
+ }
+ if resp.Data == nil {
+ return fmt.Errorf("data is nil")
+ }
+ if expectError {
+ var e struct {
+ Error string `mapstructure:"error"`
+ }
+ if err := mapstructure.Decode(resp.Data, &e); err != nil {
+ return err
+ }
+ if len(e.Error) == 0 {
+ return fmt.Errorf("expected error, but write succeeded.")
+ }
+ return nil
+ }
+ if roleName == testDynamicRoleName {
+ var d struct {
+ Key string `mapstructure:"key"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+ if d.Key == "" {
+ return fmt.Errorf("Generated key is an empty string")
+ }
+ // Checking only for a parsable key
+ _, err := ssh.ParsePrivateKey([]byte(d.Key))
+ if err != nil {
+ return fmt.Errorf("Generated key is invalid")
+ }
+ } else {
+ if resp.Data["key_type"] != KeyTypeOTP {
+ return fmt.Errorf("Incorrect key_type")
+ }
+ if resp.Data["key"] == nil {
+ return fmt.Errorf("Invalid key")
+ }
+ }
+ return nil
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/communicator.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/communicator.go
new file mode 100644
index 0000000..3ab86fa
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/communicator.go
@@ -0,0 +1,350 @@
+package ssh
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "os"
+ "path/filepath"
+
+ log "github.com/mgutz/logxi/v1"
+
+ "golang.org/x/crypto/ssh"
+ "golang.org/x/crypto/ssh/agent"
+)
+
+type comm struct {
+ client *ssh.Client
+ config *SSHCommConfig
+ conn net.Conn
+ address string
+}
+
+// SSHCommConfig is the structure used to configure the SSH communicator.
+type SSHCommConfig struct {
+ // The configuration of the Go SSH connection
+ SSHConfig *ssh.ClientConfig
+
+ // Connection returns a new connection. The current connection
+ // in use will be closed as part of the Close method, or in the
+ // case an error occurs.
+ Connection func() (net.Conn, error)
+
+ // Pty, if true, will request a pty from the remote end.
+ Pty bool
+
+ // DisableAgent, if true, will not forward the SSH agent.
+ DisableAgent bool
+
+ // Logger for output
+ Logger log.Logger
+}
+
+// Creates a new communicator implementation over SSH. This takes
+// an already existing TCP connection and SSH configuration.
+func SSHCommNew(address string, config *SSHCommConfig) (result *comm, err error) {
+ // Establish an initial connection and connect
+ result = &comm{
+ config: config,
+ address: address,
+ }
+
+ if err = result.reconnect(); err != nil {
+ result = nil
+ return
+ }
+
+ return
+}
+
+func (c *comm) Close() error {
+ var err error
+ if c.conn != nil {
+ err = c.conn.Close()
+ }
+ c.conn = nil
+ c.client = nil
+ return err
+}
+
+func (c *comm) Upload(path string, input io.Reader, fi *os.FileInfo) error {
+ // The target directory and file for talking the SCP protocol
+ target_dir := filepath.Dir(path)
+ target_file := filepath.Base(path)
+
+ // On windows, filepath.Dir uses backslash separators (ie. "\tmp").
+ // This does not work when the target host is unix. Switch to forward slash
+ // which works for unix and windows
+ target_dir = filepath.ToSlash(target_dir)
+
+ scpFunc := func(w io.Writer, stdoutR *bufio.Reader) error {
+ return scpUploadFile(target_file, input, w, stdoutR, fi)
+ }
+
+ return c.scpSession("scp -vt "+target_dir, scpFunc)
+}
+
+func (c *comm) NewSession() (session *ssh.Session, err error) {
+ if c.client == nil {
+ err = errors.New("client not available")
+ } else {
+ session, err = c.client.NewSession()
+ }
+
+ if err != nil {
+ c.config.Logger.Error("ssh session open error, attempting reconnect", "error", err)
+ if err := c.reconnect(); err != nil {
+ c.config.Logger.Error("reconnect attempt failed", "error", err)
+ return nil, err
+ }
+
+ return c.client.NewSession()
+ }
+
+ return session, nil
+}
+
+func (c *comm) reconnect() error {
+ // Close previous connection.
+ if c.conn != nil {
+ c.Close()
+ }
+
+ var err error
+ c.conn, err = c.config.Connection()
+ if err != nil {
+ // Explicitly set this to the REAL nil. Connection() can return
+ // a nil implementation of net.Conn which will make the
+ // "if c.conn == nil" check fail above. Read here for more information
+ // on this psychotic language feature:
+ //
+ // http://golang.org/doc/faq#nil_error
+ c.conn = nil
+ c.config.Logger.Error("reconnection error", "error", err)
+ return err
+ }
+
+ sshConn, sshChan, req, err := ssh.NewClientConn(c.conn, c.address, c.config.SSHConfig)
+ if err != nil {
+ c.config.Logger.Error("handshake error", "error", err)
+ c.Close()
+ return err
+ }
+ if sshConn != nil {
+ c.client = ssh.NewClient(sshConn, sshChan, req)
+ }
+ c.connectToAgent()
+
+ return nil
+}
+
+func (c *comm) connectToAgent() {
+ if c.client == nil {
+ return
+ }
+
+ if c.config.DisableAgent {
+ return
+ }
+
+ // open connection to the local agent
+ socketLocation := os.Getenv("SSH_AUTH_SOCK")
+ if socketLocation == "" {
+ return
+ }
+ agentConn, err := net.Dial("unix", socketLocation)
+ if err != nil {
+ c.config.Logger.Error("could not connect to local agent socket", "socket_path", socketLocation)
+ return
+ }
+ defer agentConn.Close()
+
+ // create agent and add in auth
+ forwardingAgent := agent.NewClient(agentConn)
+ if forwardingAgent == nil {
+ c.config.Logger.Error("could not create agent client")
+ return
+ }
+
+ // add callback for forwarding agent to SSH config
+ // XXX - might want to handle reconnects appending multiple callbacks
+ auth := ssh.PublicKeysCallback(forwardingAgent.Signers)
+ c.config.SSHConfig.Auth = append(c.config.SSHConfig.Auth, auth)
+ agent.ForwardToAgent(c.client, forwardingAgent)
+
+ // Setup a session to request agent forwarding
+ session, err := c.NewSession()
+ if err != nil {
+ return
+ }
+ defer session.Close()
+
+ err = agent.RequestAgentForwarding(session)
+ if err != nil {
+ c.config.Logger.Error("error requesting agent forwarding", "error", err)
+ return
+ }
+ return
+}
+
+func (c *comm) scpSession(scpCommand string, f func(io.Writer, *bufio.Reader) error) error {
+ session, err := c.NewSession()
+ if err != nil {
+ return err
+ }
+ defer session.Close()
+
+ // Get a pipe to stdin so that we can send data down
+ stdinW, err := session.StdinPipe()
+ if err != nil {
+ return err
+ }
+
+ // We only want to close once, so we nil w after we close it,
+ // and only close in the defer if it hasn't been closed already.
+ defer func() {
+ if stdinW != nil {
+ stdinW.Close()
+ }
+ }()
+
+ // Get a pipe to stdout so that we can get responses back
+ stdoutPipe, err := session.StdoutPipe()
+ if err != nil {
+ return err
+ }
+ stdoutR := bufio.NewReader(stdoutPipe)
+
+ // Set stderr to a bytes buffer
+ stderr := new(bytes.Buffer)
+ session.Stderr = stderr
+
+ // Start the sink mode on the other side
+ if err := session.Start(scpCommand); err != nil {
+ return err
+ }
+
+ // Call our callback that executes in the context of SCP. We ignore
+ // EOF errors if they occur because it usually means that SCP prematurely
+ // ended on the other side.
+ if err := f(stdinW, stdoutR); err != nil && err != io.EOF {
+ return err
+ }
+
+ // Close the stdin, which sends an EOF, and then set w to nil so that
+ // our defer func doesn't close it again since that is unsafe with
+ // the Go SSH package.
+ stdinW.Close()
+ stdinW = nil
+
+ // Wait for the SCP connection to close, meaning it has consumed all
+ // our data and has completed. Or has errored.
+ err = session.Wait()
+ if err != nil {
+ if exitErr, ok := err.(*ssh.ExitError); ok {
+ // Otherwise, we have an ExitErorr, meaning we can just read
+ // the exit status
+ c.config.Logger.Error("got non-zero exit status", "exit_status", exitErr.ExitStatus())
+
+ // If we exited with status 127, it means SCP isn't available.
+ // Return a more descriptive error for that.
+ if exitErr.ExitStatus() == 127 {
+ return errors.New(
+ "SCP failed to start. This usually means that SCP is not\n" +
+ "properly installed on the remote system.")
+ }
+ }
+
+ return err
+ }
+ return nil
+}
+
+// checkSCPStatus checks that a prior command sent to SCP completed
+// successfully. If it did not complete successfully, an error will
+// be returned.
+func checkSCPStatus(r *bufio.Reader) error {
+ code, err := r.ReadByte()
+ if err != nil {
+ return err
+ }
+
+ if code != 0 {
+ // Treat any non-zero (really 1 and 2) as fatal errors
+ message, _, err := r.ReadLine()
+ if err != nil {
+ return fmt.Errorf("Error reading error message: %s", err)
+ }
+
+ return errors.New(string(message))
+ }
+
+ return nil
+}
+
+func scpUploadFile(dst string, src io.Reader, w io.Writer, r *bufio.Reader, fi *os.FileInfo) error {
+ var mode os.FileMode
+ var size int64
+
+ if fi != nil && (*fi).Mode().IsRegular() {
+ mode = (*fi).Mode().Perm()
+ size = (*fi).Size()
+ } else {
+ // Create a temporary file where we can copy the contents of the src
+ // so that we can determine the length, since SCP is length-prefixed.
+ tf, err := ioutil.TempFile("", "vault-ssh-upload")
+ if err != nil {
+ return fmt.Errorf("Error creating temporary file for upload: %s", err)
+ }
+ defer os.Remove(tf.Name())
+ defer tf.Close()
+
+ mode = 0644
+
+ if _, err := io.Copy(tf, src); err != nil {
+ return err
+ }
+
+ // Sync the file so that the contents are definitely on disk, then
+ // read the length of it.
+ if err := tf.Sync(); err != nil {
+ return fmt.Errorf("Error creating temporary file for upload: %s", err)
+ }
+
+ // Seek the file to the beginning so we can re-read all of it
+ if _, err := tf.Seek(0, 0); err != nil {
+ return fmt.Errorf("Error creating temporary file for upload: %s", err)
+ }
+
+ tfi, err := tf.Stat()
+ if err != nil {
+ return fmt.Errorf("Error creating temporary file for upload: %s", err)
+ }
+
+ size = tfi.Size()
+ src = tf
+ }
+
+ // Start the protocol
+ perms := fmt.Sprintf("C%04o", mode)
+
+ fmt.Fprintln(w, perms, size, dst)
+ if err := checkSCPStatus(r); err != nil {
+ return err
+ }
+
+ if _, err := io.CopyN(w, src, size); err != nil {
+ return err
+ }
+
+ fmt.Fprint(w, "\x00")
+ if err := checkSCPStatus(r); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/linux_install_script.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/linux_install_script.go
new file mode 100644
index 0000000..2c944c4
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/linux_install_script.go
@@ -0,0 +1,69 @@
+package ssh
+
+const (
+ // This is a constant representing a script to install and uninstall public
+ // key in remote hosts.
+ DefaultPublicKeyInstallScript = `
+#!/bin/bash
+#
+# This is a default script which installs or uninstalls an RSA public key to/from
+# authorized_keys file in a typical linux machine.
+#
+# If the platform differs or if the binaries used in this script are not available
+# in target machine, use the 'install_script' parameter with 'roles/' endpoint to
+# register a custom script (applicable for Dynamic type only).
+#
+# Vault server runs this script on the target machine with the following params:
+#
+# $1:INSTALL_OPTION: "install" or "uninstall"
+#
+# $2:PUBLIC_KEY_FILE: File name containing public key to be installed. Vault server
+# uses UUID as name to avoid collisions with public keys generated for other requests.
+#
+# $3:AUTH_KEYS_FILE: Absolute path of the authorized_keys file.
+# Currently, vault uses /home//.ssh/authorized_keys as the path.
+#
+# [Note: This script will be run by Vault using the registered admin username.
+# Notice that some commands below are run as 'sudo'. For graceful execution of
+# this script there should not be any password prompts. So, disable password
+# prompt for the admin username registered with Vault.
+
+set -e
+
+# Storing arguments into variables, to increase readability of the script.
+INSTALL_OPTION=$1
+PUBLIC_KEY_FILE=$2
+AUTH_KEYS_FILE=$3
+
+# Delete the public key file and the temporary file
+function cleanup
+{
+ rm -f "$PUBLIC_KEY_FILE" temp_$PUBLIC_KEY_FILE
+}
+
+# 'cleanup' will be called if the script ends or if any command fails.
+trap cleanup EXIT
+
+# Return if the option is anything other than 'install' or 'uninstall'.
+if [ "$INSTALL_OPTION" != "install" ] && [ "$INSTALL_OPTION" != "uninstall" ]; then
+ exit 1
+fi
+
+# Create the .ssh directory and authorized_keys file if it does not exist
+SSH_DIR=$(dirname $AUTH_KEYS_FILE)
+sudo mkdir -p "$SSH_DIR"
+sudo touch "$AUTH_KEYS_FILE"
+
+# Remove the key from authorized_keys file if it is already present.
+# This step is common for both install and uninstall. Note that grep's
+# return code is ignored, thus if grep fails all keys will be removed
+# rather than none and it fails secure
+sudo grep -vFf "$PUBLIC_KEY_FILE" "$AUTH_KEYS_FILE" > temp_$PUBLIC_KEY_FILE || true
+cat temp_$PUBLIC_KEY_FILE | sudo tee "$AUTH_KEYS_FILE"
+
+# Append the new public key to authorized_keys file
+if [ "$INSTALL_OPTION" == "install" ]; then
+ cat "$PUBLIC_KEY_FILE" | sudo tee --append "$AUTH_KEYS_FILE"
+fi
+`
+)
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_config_ca.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_config_ca.go
new file mode 100644
index 0000000..37300b7
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_config_ca.go
@@ -0,0 +1,286 @@
+package ssh
+
+import (
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "fmt"
+
+ multierror "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ "golang.org/x/crypto/ssh"
+)
+
+const (
+ caPublicKey = "ca_public_key"
+ caPrivateKey = "ca_private_key"
+ caPublicKeyStoragePath = "config/ca_public_key"
+ caPublicKeyStoragePathDeprecated = "public_key"
+ caPrivateKeyStoragePath = "config/ca_private_key"
+ caPrivateKeyStoragePathDeprecated = "config/ca_bundle"
+)
+
+type keyStorageEntry struct {
+ Key string `json:"key" structs:"key" mapstructure:"key"`
+}
+
+func pathConfigCA(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "config/ca",
+ Fields: map[string]*framework.FieldSchema{
+ "private_key": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `Private half of the SSH key that will be used to sign certificates.`,
+ },
+ "public_key": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `Public half of the SSH key that will be used to sign certificates.`,
+ },
+ "generate_signing_key": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Description: `Generate SSH key pair internally rather than use the private_key and public_key fields.`,
+ Default: true,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathConfigCAUpdate,
+ logical.DeleteOperation: b.pathConfigCADelete,
+ logical.ReadOperation: b.pathConfigCARead,
+ },
+
+ HelpSynopsis: `Set the SSH private key used for signing certificates.`,
+ HelpDescription: `This sets the CA information used for certificates generated by this
+by this mount. The fields must be in the standard private and public SSH format.
+
+For security reasons, the private key cannot be retrieved later.
+
+Read operations will return the public key, if already stored/generated.`,
+ }
+}
+
+func (b *backend) pathConfigCARead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ publicKeyEntry, err := caKey(req.Storage, caPublicKey)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read CA public key: %v", err)
+ }
+
+ if publicKeyEntry == nil {
+ return logical.ErrorResponse("keys haven't been configured yet"), nil
+ }
+
+ response := &logical.Response{
+ Data: map[string]interface{}{
+ "public_key": publicKeyEntry.Key,
+ },
+ }
+
+ return response, nil
+}
+
+func (b *backend) pathConfigCADelete(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ if err := req.Storage.Delete(caPrivateKeyStoragePath); err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Delete(caPublicKeyStoragePath); err != nil {
+ return nil, err
+ }
+ return nil, nil
+}
+
+func caKey(storage logical.Storage, keyType string) (*keyStorageEntry, error) {
+ var path, deprecatedPath string
+ switch keyType {
+ case caPrivateKey:
+ path = caPrivateKeyStoragePath
+ deprecatedPath = caPrivateKeyStoragePathDeprecated
+ case caPublicKey:
+ path = caPublicKeyStoragePath
+ deprecatedPath = caPublicKeyStoragePathDeprecated
+ default:
+ return nil, fmt.Errorf("unrecognized key type %q", keyType)
+ }
+
+ entry, err := storage.Get(path)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read CA key of type %q: %v", keyType, err)
+ }
+
+ if entry == nil {
+ // If the entry is not found, look at an older path. If found, upgrade
+ // it.
+ entry, err = storage.Get(deprecatedPath)
+ if err != nil {
+ return nil, err
+ }
+ if entry != nil {
+ entry, err = logical.StorageEntryJSON(path, keyStorageEntry{
+ Key: string(entry.Value),
+ })
+ if err != nil {
+ return nil, err
+ }
+ if err := storage.Put(entry); err != nil {
+ return nil, err
+ }
+ if err = storage.Delete(deprecatedPath); err != nil {
+ return nil, err
+ }
+ }
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var keyEntry keyStorageEntry
+ if err := entry.DecodeJSON(&keyEntry); err != nil {
+ return nil, err
+ }
+
+ return &keyEntry, nil
+}
+
+func (b *backend) pathConfigCAUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ var err error
+ publicKey := data.Get("public_key").(string)
+ privateKey := data.Get("private_key").(string)
+
+ var generateSigningKey bool
+
+ generateSigningKeyRaw, ok := data.GetOk("generate_signing_key")
+ switch {
+ // explicitly set true
+ case ok && generateSigningKeyRaw.(bool):
+ if publicKey != "" || privateKey != "" {
+ return logical.ErrorResponse("public_key and private_key must not be set when generate_signing_key is set to true"), nil
+ }
+
+ generateSigningKey = true
+
+ // explicitly set to false, or not set and we have both a public and private key
+ case ok, publicKey != "" && privateKey != "":
+ if publicKey == "" {
+ return logical.ErrorResponse("missing public_key"), nil
+ }
+
+ if privateKey == "" {
+ return logical.ErrorResponse("missing private_key"), nil
+ }
+
+ _, err := ssh.ParsePrivateKey([]byte(privateKey))
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("Unable to parse private_key as an SSH private key: %v", err)), nil
+ }
+
+ _, err = parsePublicSSHKey(publicKey)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("Unable to parse public_key as an SSH public key: %v", err)), nil
+ }
+
+ // not set and no public/private key provided so generate
+ case publicKey == "" && privateKey == "":
+ generateSigningKey = true
+
+ // not set, but one or the other supplied
+ default:
+ return logical.ErrorResponse("only one of public_key and private_key set; both must be set to use, or both must be blank to auto-generate"), nil
+ }
+
+ if generateSigningKey {
+ publicKey, privateKey, err = generateSSHKeyPair()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if publicKey == "" || privateKey == "" {
+ return nil, fmt.Errorf("failed to generate or parse the keys")
+ }
+
+ publicKeyEntry, err := caKey(req.Storage, caPublicKey)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read CA public key: %v", err)
+ }
+
+ privateKeyEntry, err := caKey(req.Storage, caPrivateKey)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read CA private key: %v", err)
+ }
+
+ if (publicKeyEntry != nil && publicKeyEntry.Key != "") || (privateKeyEntry != nil && privateKeyEntry.Key != "") {
+ return nil, fmt.Errorf("keys are already configured; delete them before reconfiguring")
+ }
+
+ entry, err := logical.StorageEntryJSON(caPublicKeyStoragePath, &keyStorageEntry{
+ Key: publicKey,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Save the public key
+ err = req.Storage.Put(entry)
+ if err != nil {
+ return nil, err
+ }
+
+ entry, err = logical.StorageEntryJSON(caPrivateKeyStoragePath, &keyStorageEntry{
+ Key: privateKey,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Save the private key
+ err = req.Storage.Put(entry)
+ if err != nil {
+ var mErr *multierror.Error
+
+ mErr = multierror.Append(mErr, fmt.Errorf("failed to store CA private key: %v", err))
+
+ // If storing private key fails, the corresponding public key should be
+ // removed
+ if delErr := req.Storage.Delete(caPublicKeyStoragePath); delErr != nil {
+ mErr = multierror.Append(mErr, fmt.Errorf("failed to cleanup CA public key: %v", delErr))
+ return nil, mErr
+ }
+
+ return nil, err
+ }
+
+ if generateSigningKey {
+ response := &logical.Response{
+ Data: map[string]interface{}{
+ "public_key": publicKey,
+ },
+ }
+
+ return response, nil
+ }
+
+ return nil, nil
+}
+
+func generateSSHKeyPair() (string, string, error) {
+ privateSeed, err := rsa.GenerateKey(rand.Reader, 4096)
+ if err != nil {
+ return "", "", err
+ }
+
+ privateBlock := &pem.Block{
+ Type: "RSA PRIVATE KEY",
+ Headers: nil,
+ Bytes: x509.MarshalPKCS1PrivateKey(privateSeed),
+ }
+
+ public, err := ssh.NewPublicKey(&privateSeed.PublicKey)
+ if err != nil {
+ return "", "", err
+ }
+
+ return string(ssh.MarshalAuthorizedKey(public)), string(pem.EncodeToMemory(privateBlock)), nil
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_config_ca_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_config_ca_test.go
new file mode 100644
index 0000000..cc0b17b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_config_ca_test.go
@@ -0,0 +1,163 @@
+package ssh
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestSSH_ConfigCAStorageUpgrade(t *testing.T) {
+ var err error
+
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+
+ b, err := Backend(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = b.Setup(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Store at an older path
+ err = config.StorageView.Put(&logical.StorageEntry{
+ Key: caPrivateKeyStoragePathDeprecated,
+ Value: []byte(privateKey),
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Reading it should return the key as well as upgrade the storage path
+ privateKeyEntry, err := caKey(config.StorageView, caPrivateKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if privateKeyEntry == nil || privateKeyEntry.Key == "" {
+ t.Fatalf("failed to read the stored private key")
+ }
+
+ entry, err := config.StorageView.Get(caPrivateKeyStoragePathDeprecated)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if entry != nil {
+ t.Fatalf("bad: expected a nil entry after upgrade")
+ }
+
+ entry, err = config.StorageView.Get(caPrivateKeyStoragePath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if entry == nil {
+ t.Fatalf("bad: expected a non-nil entry after upgrade")
+ }
+
+ // Store at an older path
+ err = config.StorageView.Put(&logical.StorageEntry{
+ Key: caPublicKeyStoragePathDeprecated,
+ Value: []byte(publicKey),
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Reading it should return the key as well as upgrade the storage path
+ publicKeyEntry, err := caKey(config.StorageView, caPublicKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if publicKeyEntry == nil || publicKeyEntry.Key == "" {
+ t.Fatalf("failed to read the stored public key")
+ }
+
+ entry, err = config.StorageView.Get(caPublicKeyStoragePathDeprecated)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if entry != nil {
+ t.Fatalf("bad: expected a nil entry after upgrade")
+ }
+
+ entry, err = config.StorageView.Get(caPublicKeyStoragePath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if entry == nil {
+ t.Fatalf("bad: expected a non-nil entry after upgrade")
+ }
+}
+
+func TestSSH_ConfigCAUpdateDelete(t *testing.T) {
+ var resp *logical.Response
+ var err error
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatalf("Cannot create backend: %s", err)
+ }
+
+ caReq := &logical.Request{
+ Path: "config/ca",
+ Operation: logical.UpdateOperation,
+ Storage: config.StorageView,
+ }
+
+ // Auto-generate the keys
+ resp, err = b.HandleRequest(caReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: err: %v, resp:%v", err, resp)
+ }
+
+ // Fail to overwrite it
+ resp, err = b.HandleRequest(caReq)
+ if err == nil {
+ t.Fatalf("expected an error")
+ }
+
+ caReq.Operation = logical.DeleteOperation
+ // Delete the configured keys
+ resp, err = b.HandleRequest(caReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: err: %v, resp:%v", err, resp)
+ }
+
+ caReq.Operation = logical.UpdateOperation
+ caReq.Data = map[string]interface{}{
+ "public_key": publicKey,
+ "private_key": privateKey,
+ }
+
+ // Successfully create a new one
+ resp, err = b.HandleRequest(caReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: err: %v, resp:%v", err, resp)
+ }
+
+ // Fail to overwrite it
+ resp, err = b.HandleRequest(caReq)
+ if err == nil {
+ t.Fatalf("expected an error")
+ }
+
+ caReq.Operation = logical.DeleteOperation
+ // Delete the configured keys
+ resp, err = b.HandleRequest(caReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: err: %v, resp:%v", err, resp)
+ }
+
+ caReq.Operation = logical.UpdateOperation
+ caReq.Data = nil
+
+ // Successfully create a new one
+ resp, err = b.HandleRequest(caReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: err: %v, resp:%v", err, resp)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_config_zeroaddress.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_config_zeroaddress.go
new file mode 100644
index 0000000..ab6b838
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_config_zeroaddress.go
@@ -0,0 +1,183 @@
+package ssh
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+// Structure to hold roles that are allowed to accept any IP address.
+type zeroAddressRoles struct {
+ Roles []string `json:"roles" mapstructure:"roles"`
+}
+
+func pathConfigZeroAddress(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "config/zeroaddress",
+ Fields: map[string]*framework.FieldSchema{
+ "roles": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `[Required] Comma separated list of role names which
+ allows credentials to be requested for any IP address. CIDR blocks
+ previously registered under these roles will be ignored.`,
+ },
+ },
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathConfigZeroAddressWrite,
+ logical.ReadOperation: b.pathConfigZeroAddressRead,
+ logical.DeleteOperation: b.pathConfigZeroAddressDelete,
+ },
+ HelpSynopsis: pathConfigZeroAddressSyn,
+ HelpDescription: pathConfigZeroAddressDesc,
+ }
+}
+
+func (b *backend) pathConfigZeroAddressDelete(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ err := req.Storage.Delete("config/zeroaddress")
+ if err != nil {
+ return nil, err
+ }
+ return nil, nil
+}
+
+func (b *backend) pathConfigZeroAddressRead(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ entry, err := b.getZeroAddressRoles(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "roles": entry.Roles,
+ },
+ }, nil
+}
+
+func (b *backend) pathConfigZeroAddressWrite(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ roleNames := d.Get("roles").(string)
+ if roleNames == "" {
+ return logical.ErrorResponse("Missing roles"), nil
+ }
+
+ // Check if the roles listed actually exist in the backend
+ roles := strings.Split(roleNames, ",")
+ for _, item := range roles {
+ role, err := b.getRole(req.Storage, item)
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return logical.ErrorResponse(fmt.Sprintf("Role %q does not exist", item)), nil
+ }
+ }
+
+ err := b.putZeroAddressRoles(req.Storage, roles)
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+// Stores the given list of roles at zeroaddress endpoint
+func (b *backend) putZeroAddressRoles(s logical.Storage, roles []string) error {
+ entry, err := logical.StorageEntryJSON("config/zeroaddress", &zeroAddressRoles{
+ Roles: roles,
+ })
+ if err != nil {
+ return err
+ }
+ if err := s.Put(entry); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Retrieves the list of roles from the zeroaddress endpoint.
+func (b *backend) getZeroAddressRoles(s logical.Storage) (*zeroAddressRoles, error) {
+ entry, err := s.Get("config/zeroaddress")
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result zeroAddressRoles
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+// Removes a role from the list of roles present in config/zeroaddress path
+func (b *backend) removeZeroAddressRole(s logical.Storage, roleName string) error {
+ zeroAddressEntry, err := b.getZeroAddressRoles(s)
+ if err != nil {
+ return err
+ }
+ if zeroAddressEntry == nil {
+ return nil
+ }
+
+ err = zeroAddressEntry.remove(roleName)
+ if err != nil {
+ return err
+ }
+
+ return b.putZeroAddressRoles(s, zeroAddressEntry.Roles)
+}
+
+// Removes a given role from the comma separated string
+func (r *zeroAddressRoles) remove(roleName string) error {
+ var index int
+ for i, role := range r.Roles {
+ if role == roleName {
+ index = i
+ break
+ }
+ }
+ length := len(r.Roles)
+ if index >= length || index < 0 {
+ return fmt.Errorf("invalid index [%d]", index)
+ }
+ // If slice has zero or one item, remove the item by setting slice to nil.
+ if length < 2 {
+ r.Roles = nil
+ return nil
+ }
+
+ // Last item to be deleted
+ if length-1 == index {
+ r.Roles = r.Roles[:length-1]
+ return nil
+ }
+
+ // Delete the item by appending all items except the one at index
+ r.Roles = append(r.Roles[:index], r.Roles[index+1:]...)
+ return nil
+}
+
+const pathConfigZeroAddressSyn = `
+Assign zero address as default CIDR block for select roles.
+`
+
+const pathConfigZeroAddressDesc = `
+Administrator can choose to make a select few registered roles to accept any IP
+address, overriding the CIDR blocks registered during creation of roles. This
+doesn't mean that the credentials are created for any IP address. Clients who
+have access to these roles are trusted to make valid requests. Access to these
+roles should be controlled using Vault policies. It is recommended that all the
+roles that are allowed to accept any IP address should have an explicit policy
+of deny for unintended clients.
+
+This is a root authenticated endpoint. If backend is mounted at 'ssh' then use
+the endpoint 'ssh/config/zeroaddress' to provide the list of allowed roles.
+After mounting the backend, use 'path-help' for additional information.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_creds_create.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_creds_create.go
new file mode 100644
index 0000000..e2b1e0c
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_creds_create.go
@@ -0,0 +1,326 @@
+package ssh
+
+import (
+ "fmt"
+ "net"
+ "strings"
+
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+type sshOTP struct {
+ Username string `json:"username" structs:"username" mapstructure:"username"`
+ IP string `json:"ip" structs:"ip" mapstructure:"ip"`
+ RoleName string `json:"role_name" structs:"role_name" mapstructure:"role_name"`
+}
+
+func pathCredsCreate(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "creds/" + framework.GenericNameRegex("role"),
+ Fields: map[string]*framework.FieldSchema{
+ "role": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "[Required] Name of the role",
+ },
+ "username": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "[Optional] Username in remote host",
+ },
+ "ip": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "[Required] IP of the remote host",
+ },
+ },
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathCredsCreateWrite,
+ },
+ HelpSynopsis: pathCredsCreateHelpSyn,
+ HelpDescription: pathCredsCreateHelpDesc,
+ }
+}
+
+func (b *backend) pathCredsCreateWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ roleName := d.Get("role").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("Missing role"), nil
+ }
+
+ ipRaw := d.Get("ip").(string)
+ if ipRaw == "" {
+ return logical.ErrorResponse("Missing ip"), nil
+ }
+
+ role, err := b.getRole(req.Storage, roleName)
+ if err != nil {
+ return nil, fmt.Errorf("error retrieving role: %v", err)
+ }
+ if role == nil {
+ return logical.ErrorResponse(fmt.Sprintf("Role %q not found", roleName)), nil
+ }
+
+ // username is an optional parameter.
+ username := d.Get("username").(string)
+
+ // Set the default username
+ if username == "" {
+ if role.DefaultUser == "" {
+ return logical.ErrorResponse("No default username registered. Use 'username' option"), nil
+ }
+ username = role.DefaultUser
+ }
+
+ if role.AllowedUsers != "" {
+ // Check if the username is present in allowed users list.
+ err := validateUsername(username, role.AllowedUsers)
+
+ // If username is not present in allowed users list, check if it
+ // is the default username in the role. If neither is true, then
+ // that username is not allowed to generate a credential.
+ if err != nil && username != role.DefaultUser {
+ return logical.ErrorResponse("Username is not present is allowed users list"), nil
+ }
+ } else if username != role.DefaultUser {
+ return logical.ErrorResponse("Username has to be either in allowed users list or has to be a default username"), nil
+ }
+
+ // Validate the IP address
+ ipAddr := net.ParseIP(ipRaw)
+ if ipAddr == nil {
+ return logical.ErrorResponse(fmt.Sprintf("Invalid IP %q", ipRaw)), nil
+ }
+
+ // Check if the IP belongs to the registered list of CIDR blocks under the role
+ ip := ipAddr.String()
+
+ zeroAddressEntry, err := b.getZeroAddressRoles(req.Storage)
+ if err != nil {
+ return nil, fmt.Errorf("error retrieving zero-address roles: %v", err)
+ }
+ var zeroAddressRoles []string
+ if zeroAddressEntry != nil {
+ zeroAddressRoles = zeroAddressEntry.Roles
+ }
+
+ err = validateIP(ip, roleName, role.CIDRList, role.ExcludeCIDRList, zeroAddressRoles)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("Error validating IP: %v", err)), nil
+ }
+
+ var result *logical.Response
+ if role.KeyType == KeyTypeOTP {
+ // Generate an OTP
+ otp, err := b.GenerateOTPCredential(req, &sshOTP{
+ Username: username,
+ IP: ip,
+ RoleName: roleName,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return the information relevant to user of OTP type and save
+ // the data required for later use in the internal section of secret.
+ // In this case, saving just the OTP is sufficient since there is
+ // no need to establish connection with the remote host.
+ result = b.Secret(SecretOTPType).Response(map[string]interface{}{
+ "key_type": role.KeyType,
+ "key": otp,
+ "username": username,
+ "ip": ip,
+ "port": role.Port,
+ }, map[string]interface{}{
+ "otp": otp,
+ })
+ } else if role.KeyType == KeyTypeDynamic {
+ // Generate an RSA key pair. This also installs the newly generated
+ // public key in the remote host.
+ dynamicPublicKey, dynamicPrivateKey, err := b.GenerateDynamicCredential(req, role, username, ip)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return the information relevant to user of dynamic type and save
+ // information required for later use in internal section of secret.
+ result = b.Secret(SecretDynamicKeyType).Response(map[string]interface{}{
+ "key": dynamicPrivateKey,
+ "key_type": role.KeyType,
+ "username": username,
+ "ip": ip,
+ "port": role.Port,
+ }, map[string]interface{}{
+ "admin_user": role.AdminUser,
+ "username": username,
+ "ip": ip,
+ "host_key_name": role.KeyName,
+ "dynamic_public_key": dynamicPublicKey,
+ "port": role.Port,
+ "install_script": role.InstallScript,
+ })
+ } else {
+ return nil, fmt.Errorf("key type unknown")
+ }
+
+ return result, nil
+}
+
+// Generates a RSA key pair and installs it in the remote target
+func (b *backend) GenerateDynamicCredential(req *logical.Request, role *sshRole, username, ip string) (string, string, error) {
+ // Fetch the host key to be used for dynamic key installation
+ keyEntry, err := req.Storage.Get(fmt.Sprintf("keys/%s", role.KeyName))
+ if err != nil {
+ return "", "", fmt.Errorf("key %q not found. err: %v", role.KeyName, err)
+ }
+
+ if keyEntry == nil {
+ return "", "", fmt.Errorf("key %q not found", role.KeyName)
+ }
+
+ var hostKey sshHostKey
+ if err := keyEntry.DecodeJSON(&hostKey); err != nil {
+ return "", "", fmt.Errorf("error reading the host key: %v", err)
+ }
+
+ // Generate a new RSA key pair with the given key length.
+ dynamicPublicKey, dynamicPrivateKey, err := generateRSAKeys(role.KeyBits)
+ if err != nil {
+ return "", "", fmt.Errorf("error generating key: %v", err)
+ }
+
+ if len(role.KeyOptionSpecs) != 0 {
+ dynamicPublicKey = fmt.Sprintf("%s %s", role.KeyOptionSpecs, dynamicPublicKey)
+ }
+
+ // Add the public key to authorized_keys file in target machine
+ err = b.installPublicKeyInTarget(role.AdminUser, username, ip, role.Port, hostKey.Key, dynamicPublicKey, role.InstallScript, true)
+ if err != nil {
+ return "", "", fmt.Errorf("failed to add public key to authorized_keys file in target: %v", err)
+ }
+ return dynamicPublicKey, dynamicPrivateKey, nil
+}
+
+// Generates a UUID OTP and its salted value based on the salt of the backend.
+func (b *backend) GenerateSaltedOTP() (string, string, error) {
+ str, err := uuid.GenerateUUID()
+ if err != nil {
+ return "", "", err
+ }
+ return str, b.salt.SaltID(str), nil
+}
+
+// Generates an UUID OTP and creates an entry for the same in storage backend with its salted string.
+func (b *backend) GenerateOTPCredential(req *logical.Request, sshOTPEntry *sshOTP) (string, error) {
+ otp, otpSalted, err := b.GenerateSaltedOTP()
+ if err != nil {
+ return "", err
+ }
+
+ // Check if there is an entry already created for the newly generated OTP.
+ entry, err := b.getOTP(req.Storage, otpSalted)
+
+ // If entry already exists for the OTP, make sure that new OTP is not
+ // replacing an existing one by recreating new ones until an unused
+ // OTP is generated. It is very unlikely that this is the case and this
+ // code is just for safety.
+ for err == nil && entry != nil {
+ otp, otpSalted, err = b.GenerateSaltedOTP()
+ if err != nil {
+ return "", err
+ }
+ entry, err = b.getOTP(req.Storage, otpSalted)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ // Store an entry for the salt of OTP.
+ newEntry, err := logical.StorageEntryJSON("otp/"+otpSalted, sshOTPEntry)
+ if err != nil {
+ return "", err
+ }
+ if err := req.Storage.Put(newEntry); err != nil {
+ return "", err
+ }
+ return otp, nil
+}
+
+// ValidateIP first checks if the role belongs to the list of privileged
+// roles that could allow any IP address and if there is a match, IP is
+// accepted immediately. If not, IP is searched in the allowed CIDR blocks
+// registered with the role. If there is a match, then it is searched in the
+// excluded CIDR blocks and if IP is found there as well, an error is returned.
+// IP is valid only if it is encompassed by allowed CIDR blocks and not by
+// excluded CIDR blocks.
+func validateIP(ip, roleName, cidrList, excludeCidrList string, zeroAddressRoles []string) error {
+ // Search IP in the zero-address list
+ for _, role := range zeroAddressRoles {
+ if roleName == role {
+ return nil
+ }
+ }
+
+ // Search IP in allowed CIDR blocks
+ ipMatched, err := cidrListContainsIP(ip, cidrList)
+ if err != nil {
+ return err
+ }
+ if !ipMatched {
+ return fmt.Errorf("IP does not belong to role")
+ }
+
+ if len(excludeCidrList) == 0 {
+ return nil
+ }
+
+ // Search IP in exclude list
+ ipMatched, err = cidrListContainsIP(ip, excludeCidrList)
+ if err != nil {
+ return err
+ }
+ if ipMatched {
+ return fmt.Errorf("IP does not belong to role")
+ }
+
+ return nil
+}
+
+// Checks if the username supplied by the user is present in the list of
+// allowed users registered which creation of role.
+func validateUsername(username, allowedUsers string) error {
+ if allowedUsers == "" {
+ return fmt.Errorf("username not in allowed users list")
+ }
+
+ // Role was explicitly configured to allow any username.
+ if allowedUsers == "*" {
+ return nil
+ }
+
+ userList := strings.Split(allowedUsers, ",")
+ for _, user := range userList {
+ if strings.TrimSpace(user) == username {
+ return nil
+ }
+ }
+
+ return fmt.Errorf("username not in allowed users list")
+}
+
+const pathCredsCreateHelpSyn = `
+Creates a credential for establishing SSH connection with the remote host.
+`
+
+const pathCredsCreateHelpDesc = `
+This path will generate a new key for establishing SSH session with
+target host. The key can either be a long lived dynamic key or a One
+Time Password (OTP), using 'key_type' parameter being 'dynamic' or
+'otp' respectively. For dynamic keys, a named key should be supplied.
+Create named key using the 'keys/' endpoint, and this represents the
+shared SSH key of target host. If this backend is mounted at 'ssh',
+then "ssh/creds/web" would generate a key for 'web' role.
+
+Keys will have a lease associated with them. The access keys can be
+revoked by using the lease ID.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_fetch.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_fetch.go
new file mode 100644
index 0000000..1b59794
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_fetch.go
@@ -0,0 +1,39 @@
+package ssh
+
+import (
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathFetchPublicKey(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: `public_key`,
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathFetchPublicKey,
+ },
+
+ HelpSynopsis: `Retrieve the public key.`,
+ HelpDescription: `This allows the public key, that this backend has been configured with, to be fetched.`,
+ }
+}
+
+func (b *backend) pathFetchPublicKey(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ publicKeyEntry, err := caKey(req.Storage, caPublicKey)
+ if err != nil {
+ return nil, err
+ }
+ if publicKeyEntry == nil || publicKeyEntry.Key == "" {
+ return nil, nil
+ }
+
+ response := &logical.Response{
+ Data: map[string]interface{}{
+ logical.HTTPContentType: "text/plain",
+ logical.HTTPRawBody: []byte(publicKeyEntry.Key),
+ logical.HTTPStatusCode: 200,
+ },
+ }
+
+ return response, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_keys.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_keys.go
new file mode 100644
index 0000000..aa0f444
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_keys.go
@@ -0,0 +1,109 @@
+package ssh
+
+import (
+ "fmt"
+
+ "golang.org/x/crypto/ssh"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+type sshHostKey struct {
+ Key string `json:"key"`
+}
+
+func pathKeys(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "keys/" + framework.GenericNameRegex("key_name"),
+ Fields: map[string]*framework.FieldSchema{
+ "key_name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "[Required] Name of the key",
+ },
+ "key": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "[Required] SSH private key with super user privileges in host",
+ },
+ },
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathKeysWrite,
+ logical.DeleteOperation: b.pathKeysDelete,
+ },
+ HelpSynopsis: pathKeysSyn,
+ HelpDescription: pathKeysDesc,
+ }
+}
+
+func (b *backend) getKey(s logical.Storage, n string) (*sshHostKey, error) {
+ entry, err := s.Get("keys/" + n)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result sshHostKey
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+ return &result, nil
+}
+
+func (b *backend) pathKeysDelete(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ keyName := d.Get("key_name").(string)
+ keyPath := fmt.Sprintf("keys/%s", keyName)
+ err := req.Storage.Delete(keyPath)
+ if err != nil {
+ return nil, err
+ }
+ return nil, nil
+}
+
+func (b *backend) pathKeysWrite(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ keyName := d.Get("key_name").(string)
+ if keyName == "" {
+ return logical.ErrorResponse("Missing key_name"), nil
+ }
+
+ keyString := d.Get("key").(string)
+
+ // Check if the key provided is infact a private key
+ signer, err := ssh.ParsePrivateKey([]byte(keyString))
+ if err != nil || signer == nil {
+ return logical.ErrorResponse("Invalid key"), nil
+ }
+
+ if keyString == "" {
+ return logical.ErrorResponse("Missing key"), nil
+ }
+
+ keyPath := fmt.Sprintf("keys/%s", keyName)
+
+ // Store the key
+ entry, err := logical.StorageEntryJSON(keyPath, map[string]interface{}{
+ "key": keyString,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+ return nil, nil
+}
+
+const pathKeysSyn = `
+Register a shared private key with Vault.
+`
+
+const pathKeysDesc = `
+Vault uses this key to install and uninstall dynamic keys in remote hosts. This
+key should have sudoer privileges in remote hosts. This enables installing keys
+for unprivileged usernames.
+
+If this backend is mounted as "ssh", then the endpoint for registering shared
+key is "ssh/keys/". The name given here can be associated with any number
+of roles via the endpoint "ssh/roles/".
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_lookup.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_lookup.go
new file mode 100644
index 0000000..f253258
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_lookup.go
@@ -0,0 +1,90 @@
+package ssh
+
+import (
+ "fmt"
+ "net"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathLookup(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "lookup",
+ Fields: map[string]*framework.FieldSchema{
+ "ip": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "[Required] IP address of remote host",
+ },
+ },
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathLookupWrite,
+ },
+ HelpSynopsis: pathLookupSyn,
+ HelpDescription: pathLookupDesc,
+ }
+}
+
+func (b *backend) pathLookupWrite(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ ipAddr := d.Get("ip").(string)
+ if ipAddr == "" {
+ return logical.ErrorResponse("Missing ip"), nil
+ }
+ ip := net.ParseIP(ipAddr)
+ if ip == nil {
+ return logical.ErrorResponse(fmt.Sprintf("Invalid IP %q", ip.String())), nil
+ }
+
+ // Get all the roles created in the backend.
+ keys, err := req.Storage.List("roles/")
+ if err != nil {
+ return nil, err
+ }
+
+ // Look for roles which has CIDR blocks that encompasses the given IP
+ // and create a list out of it.
+ var matchingRoles []string
+ for _, role := range keys {
+ if contains, _ := roleContainsIP(req.Storage, role, ip.String()); contains {
+ matchingRoles = append(matchingRoles, role)
+ }
+ }
+
+ // Add roles that are allowed to accept any IP address.
+ zeroAddressEntry, err := b.getZeroAddressRoles(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+ if zeroAddressEntry != nil {
+ matchingRoles = append(matchingRoles, zeroAddressEntry.Roles...)
+ }
+
+ // This list may potentially reveal more information than it is supposed to.
+ // The roles for which the client is not authorized to will also be displayed.
+ // However, if the client tries to use the role for which the client is not
+ // authenticated, it will fail. It is not a problem. In a way this can be
+ // viewed as a feature. The client can ask for permissions to be given for
+ // a specific role if things are not working!
+ //
+ // Ideally, the role names should be filtered and only the roles which
+ // the client is authorized to see, should be returned.
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "roles": matchingRoles,
+ },
+ }, nil
+}
+
+const pathLookupSyn = `
+List all the roles associated with the given IP address.
+`
+
+const pathLookupDesc = `
+The IP address for which the key is requested, is searched in the CIDR blocks
+registered with vault using the 'roles' endpoint. Keys can be generated only by
+specifying the 'role' name. The roles that can be used to generate the key for
+a particular IP, are listed via this endpoint. For example, if this backend is
+mounted at "ssh", then "ssh/lookup" lists the roles associated with keys can be
+generated for a target IP, if the CIDR block encompassing the IP is registered
+with vault.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_roles.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_roles.go
new file mode 100644
index 0000000..b905115
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_roles.go
@@ -0,0 +1,617 @@
+package ssh
+
+import (
+ "fmt"
+ "strings"
+
+ "time"
+
+ "github.com/hashicorp/vault/helper/cidrutil"
+ "github.com/hashicorp/vault/helper/parseutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+const (
+ KeyTypeOTP = "otp"
+ KeyTypeDynamic = "dynamic"
+ KeyTypeCA = "ca"
+)
+
+// Structure that represents a role in SSH backend. This is a common role structure
+// for both OTP and Dynamic roles. Not all the fields are mandatory for both type.
+// Some are applicable for one and not for other. It doesn't matter.
+type sshRole struct {
+ KeyType string `mapstructure:"key_type" json:"key_type"`
+ KeyName string `mapstructure:"key" json:"key"`
+ KeyBits int `mapstructure:"key_bits" json:"key_bits"`
+ AdminUser string `mapstructure:"admin_user" json:"admin_user"`
+ DefaultUser string `mapstructure:"default_user" json:"default_user"`
+ CIDRList string `mapstructure:"cidr_list" json:"cidr_list"`
+ ExcludeCIDRList string `mapstructure:"exclude_cidr_list" json:"exclude_cidr_list"`
+ Port int `mapstructure:"port" json:"port"`
+ InstallScript string `mapstructure:"install_script" json:"install_script"`
+ AllowedUsers string `mapstructure:"allowed_users" json:"allowed_users"`
+ AllowedDomains string `mapstructure:"allowed_domains" json:"allowed_domains"`
+ KeyOptionSpecs string `mapstructure:"key_option_specs" json:"key_option_specs"`
+ MaxTTL string `mapstructure:"max_ttl" json:"max_ttl"`
+ TTL string `mapstructure:"ttl" json:"ttl"`
+ DefaultCriticalOptions map[string]string `mapstructure:"default_critical_options" json:"default_critical_options"`
+ DefaultExtensions map[string]string `mapstructure:"default_extensions" json:"default_extensions"`
+ AllowedCriticalOptions string `mapstructure:"allowed_critical_options" json:"allowed_critical_options"`
+ AllowedExtensions string `mapstructure:"allowed_extensions" json:"allowed_extensions"`
+ AllowUserCertificates bool `mapstructure:"allow_user_certificates" json:"allow_user_certificates"`
+ AllowHostCertificates bool `mapstructure:"allow_host_certificates" json:"allow_host_certificates"`
+ AllowBareDomains bool `mapstructure:"allow_bare_domains" json:"allow_bare_domains"`
+ AllowSubdomains bool `mapstructure:"allow_subdomains" json:"allow_subdomains"`
+ AllowUserKeyIDs bool `mapstructure:"allow_user_key_ids" json:"allow_user_key_ids"`
+}
+
+func pathListRoles(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "roles/?$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.pathRoleList,
+ },
+
+ HelpSynopsis: pathRoleHelpSyn,
+ HelpDescription: pathRoleHelpDesc,
+ }
+}
+
+func pathRoles(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "roles/" + framework.GenericNameRegex("role"),
+ Fields: map[string]*framework.FieldSchema{
+ "role": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `
+ [Required for all types]
+ Name of the role being created.`,
+ },
+ "key": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `
+ [Required for Dynamic type] [Not applicable for OTP type] [Not applicable for CA type]
+ Name of the registered key in Vault. Before creating the role, use the
+ 'keys/' endpoint to create a named key.`,
+ },
+ "admin_user": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `
+ [Required for Dynamic type] [Not applicable for OTP type] [Not applicable for CA type]
+ Admin user at remote host. The shared key being registered should be
+ for this user and should have root privileges. Everytime a dynamic
+ credential is being generated for other users, Vault uses this admin
+ username to login to remote host and install the generated credential
+ for the other user.`,
+ },
+ "default_user": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `
+ [Required for Dynamic type] [Required for OTP type] [Optional for CA type]
+ Default username for which a credential will be generated.
+ When the endpoint 'creds/' is used without a username, this
+ value will be used as default username.`,
+ },
+ "cidr_list": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `
+ [Optional for Dynamic type] [Optional for OTP type] [Not applicable for CA type]
+ Comma separated list of CIDR blocks for which the role is applicable for.
+ CIDR blocks can belong to more than one role.`,
+ },
+ "exclude_cidr_list": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `
+ [Optional for Dynamic type] [Optional for OTP type] [Not applicable for CA type]
+ Comma separated list of CIDR blocks. IP addresses belonging to these blocks are not
+ accepted by the role. This is particularly useful when big CIDR blocks are being used
+ by the role and certain parts of it needs to be kept out.`,
+ },
+ "port": &framework.FieldSchema{
+ Type: framework.TypeInt,
+ Description: `
+ [Optional for Dynamic type] [Optional for OTP type] [Not applicable for CA type]
+ Port number for SSH connection. Default is '22'. Port number does not
+ play any role in creation of OTP. For 'otp' type, this is just a way
+ to inform client about the port number to use. Port number will be
+ returned to client by Vault server along with OTP.`,
+ },
+ "key_type": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `
+ [Required for all types]
+ Type of key used to login to hosts. It can be either 'otp', 'dynamic' or 'ca'.
+ 'otp' type requires agent to be installed in remote hosts.`,
+ },
+ "key_bits": &framework.FieldSchema{
+ Type: framework.TypeInt,
+ Description: `
+ [Optional for Dynamic type] [Not applicable for OTP type] [Not applicable for CA type]
+ Length of the RSA dynamic key in bits. It is 1024 by default or it can be 2048.`,
+ },
+ "install_script": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `
+ [Optional for Dynamic type] [Not-applicable for OTP type] [Not applicable for CA type]
+ Script used to install and uninstall public keys in the target machine.
+ The inbuilt default install script will be for Linux hosts. For sample
+ script, refer the project documentation website.`,
+ },
+ "allowed_users": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `
+ [Optional for all types] [Works differently for CA type]
+ If this option is not specified, or is '*', client can request a
+ credential for any valid user at the remote host, including the
+ admin user. If only certain usernames are to be allowed, then
+ this list enforces it. If this field is set, then credentials
+ can only be created for default_user and usernames present in
+ this list. Setting this option will enable all the users with
+ access this role to fetch credentials for all other usernames
+ in this list. Use with caution. N.B.: with the CA type, an empty
+ list means that no users are allowed; explicitly specify '*' to
+ allow any user.
+ `,
+ },
+ "allowed_domains": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `
+ [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type]
+ If this option is not specified, client can request for a signed certificate for any
+ valid host. If only certain domains are allowed, then this list enforces it.
+ `,
+ },
+ "key_option_specs": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `
+ [Optional for Dynamic type] [Not applicable for OTP type] [Not applicable for CA type]
+ Comma separated option specifications which will be prefixed to RSA key in
+ authorized_keys file. Options should be valid and comply with authorized_keys
+ file format and should not contain spaces.
+ `,
+ },
+ "ttl": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `
+ [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type]
+ The lease duration if no specific lease duration is
+ requested. The lease duration controls the expiration
+ of certificates issued by this backend. Defaults to
+ the value of max_ttl.`,
+ },
+ "max_ttl": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `
+ [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type]
+ The maximum allowed lease duration
+ `,
+ },
+ "allowed_critical_options": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `
+ [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type]
+ A comma-separated list of critical options that certificates can have when signed.
+ To allow any critical options, set this to an empty string.
+ `,
+ },
+ "allowed_extensions": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `
+ [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type]
+ A comma-separated list of extensions that certificates can have when signed.
+ To allow any extensions, set this to an empty string.
+ `,
+ },
+ "default_critical_options": &framework.FieldSchema{
+ Type: framework.TypeMap,
+ Description: `
+ [Not applicable for Dynamic type] [Not applicable for OTP type]
+ [Optional for CA type] Critical options certificates should
+ have if none are provided when signing. This field takes in key
+ value pairs in JSON format. Note that these are not restricted
+ by "allowed_critical_options". Defaults to none.
+`,
+ },
+ "default_extensions": &framework.FieldSchema{
+ Type: framework.TypeMap,
+ Description: `
+ [Not applicable for Dynamic type] [Not applicable for OTP type]
+ [Optional for CA type] Extensions certificates should have if
+ none are provided when signing. This field takes in key value
+ pairs in JSON format. Note that these are not restricted by
+ "allowed_extensions". Defaults to none.
+ `,
+ },
+ "allow_user_certificates": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Description: `
+ [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type]
+ If set, certificates are allowed to be signed for use as a 'user'.
+ `,
+ Default: false,
+ },
+ "allow_host_certificates": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Description: `
+ [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type]
+ If set, certificates are allowed to be signed for use as a 'host'.
+ `,
+ Default: false,
+ },
+ "allow_bare_domains": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Description: `
+ [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type]
+ If set, host certificates that are requested are allowed to use the base domains listed in
+ "allowed_domains", e.g. "example.com".
+ This is a separate option as in some cases this can be considered a security threat.
+ `,
+ },
+ "allow_subdomains": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Description: `
+ [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type]
+ If set, host certificates that are requested are allowed to use subdomains of those listed in "allowed_domains".
+ `,
+ },
+ "allow_user_key_ids": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Description: `
+ [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type]
+ If true, users can override the key ID for a signed certificate with the "key_id" field.
+ When false, the key ID will always be the token display name.
+ The key ID is logged by the SSH server and can be useful for auditing.
+ `,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathRoleRead,
+ logical.UpdateOperation: b.pathRoleWrite,
+ logical.DeleteOperation: b.pathRoleDelete,
+ },
+
+ HelpSynopsis: pathRoleHelpSyn,
+ HelpDescription: pathRoleHelpDesc,
+ }
+}
+
+func (b *backend) pathRoleWrite(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ roleName := d.Get("role").(string)
+ if roleName == "" {
+ return logical.ErrorResponse("missing role name"), nil
+ }
+
+ // Allowed users is an optional field, applicable for both OTP and Dynamic types.
+ allowedUsers := d.Get("allowed_users").(string)
+
+ // Validate the CIDR blocks
+ cidrList := d.Get("cidr_list").(string)
+ if cidrList != "" {
+ valid, err := cidrutil.ValidateCIDRListString(cidrList, ",")
+ if err != nil {
+ return nil, fmt.Errorf("failed to validate cidr_list: %v", err)
+ }
+ if !valid {
+ return logical.ErrorResponse("failed to validate cidr_list"), nil
+ }
+ }
+
+ // Validate the excluded CIDR blocks
+ excludeCidrList := d.Get("exclude_cidr_list").(string)
+ if excludeCidrList != "" {
+ valid, err := cidrutil.ValidateCIDRListString(excludeCidrList, ",")
+ if err != nil {
+ return nil, fmt.Errorf("failed to validate exclude_cidr_list entry: %v", err)
+ }
+ if !valid {
+ return logical.ErrorResponse(fmt.Sprintf("failed to validate exclude_cidr_list entry: %v", err)), nil
+ }
+ }
+
+ port := d.Get("port").(int)
+ if port == 0 {
+ port = 22
+ }
+
+ keyType := d.Get("key_type").(string)
+ if keyType == "" {
+ return logical.ErrorResponse("missing key type"), nil
+ }
+ keyType = strings.ToLower(keyType)
+
+ var roleEntry sshRole
+ if keyType == KeyTypeOTP {
+ defaultUser := d.Get("default_user").(string)
+ if defaultUser == "" {
+ return logical.ErrorResponse("missing default user"), nil
+ }
+
+ // Admin user is not used if OTP key type is used because there is
+ // no need to login to remote machine.
+ adminUser := d.Get("admin_user").(string)
+ if adminUser != "" {
+ return logical.ErrorResponse("admin user not required for OTP type"), nil
+ }
+
+ // Below are the only fields used from the role structure for OTP type.
+ roleEntry = sshRole{
+ DefaultUser: defaultUser,
+ CIDRList: cidrList,
+ ExcludeCIDRList: excludeCidrList,
+ KeyType: KeyTypeOTP,
+ Port: port,
+ AllowedUsers: allowedUsers,
+ }
+ } else if keyType == KeyTypeDynamic {
+ defaultUser := d.Get("default_user").(string)
+ if defaultUser == "" {
+ return logical.ErrorResponse("missing default user"), nil
+ }
+ // Key name is required by dynamic type and not by OTP type.
+ keyName := d.Get("key").(string)
+ if keyName == "" {
+ return logical.ErrorResponse("missing key name"), nil
+ }
+ keyEntry, err := req.Storage.Get(fmt.Sprintf("keys/%s", keyName))
+ if err != nil || keyEntry == nil {
+ return logical.ErrorResponse(fmt.Sprintf("invalid 'key': %q", keyName)), nil
+ }
+
+ installScript := d.Get("install_script").(string)
+ keyOptionSpecs := d.Get("key_option_specs").(string)
+
+ // Setting the default script here. The script will install the
+ // generated public key in the authorized_keys file of linux host.
+ if installScript == "" {
+ installScript = DefaultPublicKeyInstallScript
+ }
+
+ adminUser := d.Get("admin_user").(string)
+ if adminUser == "" {
+ return logical.ErrorResponse("missing admin username"), nil
+ }
+
+ // This defaults to 1024 and it can also be 2048.
+ keyBits := d.Get("key_bits").(int)
+ if keyBits != 0 && keyBits != 1024 && keyBits != 2048 {
+ return logical.ErrorResponse("invalid key_bits field"), nil
+ }
+
+ // If user has not set this field, default it to 1024
+ if keyBits == 0 {
+ keyBits = 1024
+ }
+
+ // Store all the fields required by dynamic key type
+ roleEntry = sshRole{
+ KeyName: keyName,
+ AdminUser: adminUser,
+ DefaultUser: defaultUser,
+ CIDRList: cidrList,
+ ExcludeCIDRList: excludeCidrList,
+ Port: port,
+ KeyType: KeyTypeDynamic,
+ KeyBits: keyBits,
+ InstallScript: installScript,
+ AllowedUsers: allowedUsers,
+ KeyOptionSpecs: keyOptionSpecs,
+ }
+ } else if keyType == KeyTypeCA {
+ role, errorResponse := b.createCARole(allowedUsers, d.Get("default_user").(string), d)
+ if errorResponse != nil {
+ return errorResponse, nil
+ }
+ roleEntry = *role
+ } else {
+ return logical.ErrorResponse("invalid key type"), nil
+ }
+
+ entry, err := logical.StorageEntryJSON(fmt.Sprintf("roles/%s", roleName), roleEntry)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+ return nil, nil
+}
+
+func (b *backend) createCARole(allowedUsers, defaultUser string, data *framework.FieldData) (*sshRole, *logical.Response) {
+ role := &sshRole{
+ MaxTTL: data.Get("max_ttl").(string),
+ TTL: data.Get("ttl").(string),
+ AllowedCriticalOptions: data.Get("allowed_critical_options").(string),
+ AllowedExtensions: data.Get("allowed_extensions").(string),
+ AllowUserCertificates: data.Get("allow_user_certificates").(bool),
+ AllowHostCertificates: data.Get("allow_host_certificates").(bool),
+ AllowedUsers: allowedUsers,
+ AllowedDomains: data.Get("allowed_domains").(string),
+ DefaultUser: defaultUser,
+ AllowBareDomains: data.Get("allow_bare_domains").(bool),
+ AllowSubdomains: data.Get("allow_subdomains").(bool),
+ AllowUserKeyIDs: data.Get("allow_user_key_ids").(bool),
+ KeyType: KeyTypeCA,
+ }
+
+ if !role.AllowUserCertificates && !role.AllowHostCertificates {
+ return nil, logical.ErrorResponse("Either 'allow_user_certificates' or 'allow_host_certificates' must be set to 'true'")
+ }
+
+ defaultCriticalOptions := convertMapToStringValue(data.Get("default_critical_options").(map[string]interface{}))
+ defaultExtensions := convertMapToStringValue(data.Get("default_extensions").(map[string]interface{}))
+
+ var maxTTL time.Duration
+ maxSystemTTL := b.System().MaxLeaseTTL()
+ if len(role.MaxTTL) == 0 {
+ maxTTL = maxSystemTTL
+ } else {
+ var err error
+ maxTTL, err = parseutil.ParseDurationSecond(role.MaxTTL)
+ if err != nil {
+ return nil, logical.ErrorResponse(fmt.Sprintf(
+ "Invalid max ttl: %s", err))
+ }
+ }
+ if maxTTL > maxSystemTTL {
+ return nil, logical.ErrorResponse("Requested max TTL is higher than backend maximum")
+ }
+
+ ttl := b.System().DefaultLeaseTTL()
+ if len(role.TTL) != 0 {
+ var err error
+ ttl, err = parseutil.ParseDurationSecond(role.TTL)
+ if err != nil {
+ return nil, logical.ErrorResponse(fmt.Sprintf(
+ "Invalid ttl: %s", err))
+ }
+ }
+ if ttl > maxTTL {
+ // If they are using the system default, cap it to the role max;
+ // if it was specified on the command line, make it an error
+ if len(role.TTL) == 0 {
+ ttl = maxTTL
+ } else {
+ return nil, logical.ErrorResponse(
+ `"ttl" value must be less than "max_ttl" and/or backend default max lease TTL value`,
+ )
+ }
+ }
+
+ // Persist clamped TTLs
+ role.TTL = ttl.String()
+ role.MaxTTL = maxTTL.String()
+ role.DefaultCriticalOptions = defaultCriticalOptions
+ role.DefaultExtensions = defaultExtensions
+
+ return role, nil
+}
+
+func (b *backend) getRole(s logical.Storage, n string) (*sshRole, error) {
+ entry, err := s.Get("roles/" + n)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result sshRole
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+func (b *backend) pathRoleList(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ entries, err := req.Storage.List("roles/")
+ if err != nil {
+ return nil, err
+ }
+
+ return logical.ListResponse(entries), nil
+}
+
+func (b *backend) pathRoleRead(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ role, err := b.getRole(req.Storage, d.Get("role").(string))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ // Return information should be based on the key type of the role
+ if role.KeyType == KeyTypeOTP {
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "default_user": role.DefaultUser,
+ "cidr_list": role.CIDRList,
+ "exclude_cidr_list": role.ExcludeCIDRList,
+ "key_type": role.KeyType,
+ "port": role.Port,
+ "allowed_users": role.AllowedUsers,
+ },
+ }, nil
+ } else if role.KeyType == KeyTypeCA {
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "allowed_users": role.AllowedUsers,
+ "allowed_domains": role.AllowedDomains,
+ "default_user": role.DefaultUser,
+ "max_ttl": role.MaxTTL,
+ "ttl": role.TTL,
+ "allowed_critical_options": role.AllowedCriticalOptions,
+ "allowed_extensions": role.AllowedExtensions,
+ "allow_user_certificates": role.AllowUserCertificates,
+ "allow_host_certificates": role.AllowHostCertificates,
+ "allow_bare_domains": role.AllowBareDomains,
+ "allow_subdomains": role.AllowSubdomains,
+ "allow_user_key_ids": role.AllowUserKeyIDs,
+ "key_type": role.KeyType,
+ "default_critical_options": role.DefaultCriticalOptions,
+ "default_extensions": role.DefaultExtensions,
+ },
+ }, nil
+ } else {
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "key": role.KeyName,
+ "admin_user": role.AdminUser,
+ "default_user": role.DefaultUser,
+ "cidr_list": role.CIDRList,
+ "exclude_cidr_list": role.ExcludeCIDRList,
+ "port": role.Port,
+ "key_type": role.KeyType,
+ "key_bits": role.KeyBits,
+ "allowed_users": role.AllowedUsers,
+ "key_option_specs": role.KeyOptionSpecs,
+ // Returning install script will make the output look messy.
+ // But this is one way for clients to see the script that is
+ // being used to install the key. If there is some problem,
+ // the script can be modified and configured by clients.
+ "install_script": role.InstallScript,
+ },
+ }, nil
+ }
+}
+
+func (b *backend) pathRoleDelete(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ roleName := d.Get("role").(string)
+
+ // If the role was given privilege to accept any IP address, there will
+ // be an entry for this role in zero-address roles list. Before the role
+ // is removed, the entry in the list has to be removed.
+ err := b.removeZeroAddressRole(req.Storage, roleName)
+ if err != nil {
+ return nil, err
+ }
+
+ err = req.Storage.Delete(fmt.Sprintf("roles/%s", roleName))
+ if err != nil {
+ return nil, err
+ }
+ return nil, nil
+}
+
+const pathRoleHelpSyn = `
+Manage the 'roles' that can be created with this backend.
+`
+
+const pathRoleHelpDesc = `
+This path allows you to manage the roles that are used to generate credentials.
+
+Role takes a 'key_type' parameter that decides what type of credential this role
+can generate. If remote hosts have Vault SSH Agent installed, an 'otp' type can
+be used, otherwise 'dynamic' type can be used.
+
+If the backend is mounted at "ssh" and the role is created at "ssh/roles/web",
+then a user could request for a credential at "ssh/creds/web" for an IP that
+belongs to the role. The credential will be for the 'default_user' registered
+with the role. There is also an optional parameter 'username' for 'creds/' endpoint.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_sign.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_sign.go
new file mode 100644
index 0000000..b5c2e0d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_sign.go
@@ -0,0 +1,414 @@
+package ssh
+
+import (
+ "crypto/rand"
+ "crypto/sha256"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/hashicorp/vault/helper/certutil"
+ "github.com/hashicorp/vault/helper/parseutil"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ "golang.org/x/crypto/ssh"
+)
+
+type creationBundle struct {
+ KeyId string
+ ValidPrincipals []string
+ PublicKey ssh.PublicKey
+ CertificateType uint32
+ TTL time.Duration
+ Signer ssh.Signer
+ Role *sshRole
+ CriticalOptions map[string]string
+ Extensions map[string]string
+}
+
+func pathSign(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "sign/" + framework.GenericNameRegex("role"),
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathSign,
+ },
+
+ Fields: map[string]*framework.FieldSchema{
+ "role": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `The desired role with configuration for this request.`,
+ },
+ "ttl": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `The requested Time To Live for the SSH certificate;
+sets the expiration date. If not specified
+the role default, backend default, or system
+default TTL is used, in that order. Cannot
+be later than the role max TTL.`,
+ },
+ "public_key": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `SSH public key that should be signed.`,
+ },
+ "valid_principals": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `Valid principals, either usernames or hostnames, that the certificate should be signed for.`,
+ },
+ "cert_type": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `Type of certificate to be created; either "user" or "host".`,
+ Default: "user",
+ },
+ "key_id": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `Key id that the created certificate should have. If not specified, the display name of the token will be used.`,
+ },
+ "critical_options": &framework.FieldSchema{
+ Type: framework.TypeMap,
+ Description: `Critical options that the certificate should be signed for.`,
+ },
+ "extensions": &framework.FieldSchema{
+ Type: framework.TypeMap,
+ Description: `Extensions that the certificate should be signed for.`,
+ },
+ },
+
+ HelpSynopsis: `Request signing an SSH key using a certain role with the provided details.`,
+ HelpDescription: `This path allows SSH keys to be signed according to the policy of the given role.`,
+ }
+}
+
+func (b *backend) pathSign(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ roleName := data.Get("role").(string)
+
+ // Get the role
+ role, err := b.getRole(req.Storage, roleName)
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return logical.ErrorResponse(fmt.Sprintf("Unknown role: %s", roleName)), nil
+ }
+
+ return b.pathSignCertificate(req, data, role)
+}
+
+func (b *backend) pathSignCertificate(req *logical.Request, data *framework.FieldData, role *sshRole) (*logical.Response, error) {
+ publicKey := data.Get("public_key").(string)
+ if publicKey == "" {
+ return logical.ErrorResponse("missing public_key"), nil
+ }
+
+ userPublicKey, err := parsePublicSSHKey(publicKey)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("failed to parse public_key as SSH key: %s", err)), nil
+ }
+
+ // Note that these various functions always return "user errors" so we pass
+ // them as 4xx values
+ keyId, err := b.calculateKeyId(data, req, role, userPublicKey)
+ if err != nil {
+ return logical.ErrorResponse(err.Error()), nil
+ }
+
+ certificateType, err := b.calculateCertificateType(data, role)
+ if err != nil {
+ return logical.ErrorResponse(err.Error()), nil
+ }
+
+ var parsedPrincipals []string
+ if certificateType == ssh.HostCert {
+ parsedPrincipals, err = b.calculateValidPrincipals(data, "", role.AllowedDomains, validateValidPrincipalForHosts(role))
+ if err != nil {
+ return logical.ErrorResponse(err.Error()), nil
+ }
+ } else {
+ parsedPrincipals, err = b.calculateValidPrincipals(data, role.DefaultUser, role.AllowedUsers, strutil.StrListContains)
+ if err != nil {
+ return logical.ErrorResponse(err.Error()), nil
+ }
+ }
+
+ ttl, err := b.calculateTTL(data, role)
+ if err != nil {
+ return logical.ErrorResponse(err.Error()), nil
+ }
+
+ criticalOptions, err := b.calculateCriticalOptions(data, role)
+ if err != nil {
+ return logical.ErrorResponse(err.Error()), nil
+ }
+
+ extensions, err := b.calculateExtensions(data, role)
+ if err != nil {
+ return logical.ErrorResponse(err.Error()), nil
+ }
+
+ privateKeyEntry, err := caKey(req.Storage, caPrivateKey)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read CA private key: %v", err)
+ }
+ if privateKeyEntry == nil || privateKeyEntry.Key == "" {
+ return nil, fmt.Errorf("failed to read CA private key")
+ }
+
+ signer, err := ssh.ParsePrivateKey([]byte(privateKeyEntry.Key))
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse stored CA private key: %v", err)
+ }
+
+ cBundle := creationBundle{
+ KeyId: keyId,
+ PublicKey: userPublicKey,
+ Signer: signer,
+ ValidPrincipals: parsedPrincipals,
+ TTL: ttl,
+ CertificateType: certificateType,
+ Role: role,
+ CriticalOptions: criticalOptions,
+ Extensions: extensions,
+ }
+
+ certificate, err := cBundle.sign()
+ if err != nil {
+ return nil, err
+ }
+
+ signedSSHCertificate := ssh.MarshalAuthorizedKey(certificate)
+ if len(signedSSHCertificate) == 0 {
+ return nil, fmt.Errorf("error marshaling signed certificate")
+ }
+
+ response := &logical.Response{
+ Data: map[string]interface{}{
+ "serial_number": strconv.FormatUint(certificate.Serial, 16),
+ "signed_key": string(signedSSHCertificate),
+ },
+ }
+
+ return response, nil
+}
+
+func (b *backend) calculateValidPrincipals(data *framework.FieldData, defaultPrincipal, principalsAllowedByRole string, validatePrincipal func([]string, string) bool) ([]string, error) {
+ validPrincipals := ""
+ validPrincipalsRaw, ok := data.GetOk("valid_principals")
+ if ok {
+ validPrincipals = validPrincipalsRaw.(string)
+ } else {
+ validPrincipals = defaultPrincipal
+ }
+
+ parsedPrincipals := strutil.RemoveDuplicates(strutil.ParseStringSlice(validPrincipals, ","), false)
+ allowedPrincipals := strutil.RemoveDuplicates(strutil.ParseStringSlice(principalsAllowedByRole, ","), false)
+ switch {
+ case len(parsedPrincipals) == 0:
+ // There is nothing to process
+ return nil, nil
+ case len(allowedPrincipals) == 0:
+ // User has requested principals to be set, but role is not configured
+ // with any principals
+ return nil, fmt.Errorf("role is not configured to allow any principles")
+ default:
+ // Role was explicitly configured to allow any principal.
+ if principalsAllowedByRole == "*" {
+ return parsedPrincipals, nil
+ }
+
+ for _, principal := range parsedPrincipals {
+ if !validatePrincipal(allowedPrincipals, principal) {
+ return nil, fmt.Errorf("%v is not a valid value for valid_principals", principal)
+ }
+ }
+ return parsedPrincipals, nil
+ }
+}
+
+func validateValidPrincipalForHosts(role *sshRole) func([]string, string) bool {
+ return func(allowedPrincipals []string, validPrincipal string) bool {
+ for _, allowedPrincipal := range allowedPrincipals {
+ if allowedPrincipal == validPrincipal && role.AllowBareDomains {
+ return true
+ }
+ if role.AllowSubdomains && strings.HasSuffix(validPrincipal, "."+allowedPrincipal) {
+ return true
+ }
+ }
+
+ return false
+ }
+}
+
+func (b *backend) calculateCertificateType(data *framework.FieldData, role *sshRole) (uint32, error) {
+ requestedCertificateType := data.Get("cert_type").(string)
+
+ var certificateType uint32
+ switch requestedCertificateType {
+ case "user":
+ if !role.AllowUserCertificates {
+ return 0, errors.New("cert_type 'user' is not allowed by role")
+ }
+ certificateType = ssh.UserCert
+ case "host":
+ if !role.AllowHostCertificates {
+ return 0, errors.New("cert_type 'host' is not allowed by role")
+ }
+ certificateType = ssh.HostCert
+ default:
+ return 0, errors.New("cert_type must be either 'user' or 'host'")
+ }
+
+ return certificateType, nil
+}
+
+func (b *backend) calculateKeyId(data *framework.FieldData, req *logical.Request, role *sshRole, pubKey ssh.PublicKey) (string, error) {
+ reqId := data.Get("key_id").(string)
+
+ if reqId != "" {
+ if !role.AllowUserKeyIDs {
+ return "", fmt.Errorf("setting key_id is not allowed by role")
+ }
+ return reqId, nil
+ }
+
+ keyHash := sha256.Sum256(pubKey.Marshal())
+ keyId := hex.EncodeToString(keyHash[:])
+
+ if req.DisplayName != "" {
+ keyId = fmt.Sprintf("%s-%s", req.DisplayName, keyId)
+ }
+
+ keyId = fmt.Sprintf("vault-%s", keyId)
+
+ return keyId, nil
+}
+
+func (b *backend) calculateCriticalOptions(data *framework.FieldData, role *sshRole) (map[string]string, error) {
+ unparsedCriticalOptions := data.Get("critical_options").(map[string]interface{})
+ if len(unparsedCriticalOptions) == 0 {
+ return role.DefaultCriticalOptions, nil
+ }
+
+ criticalOptions := convertMapToStringValue(unparsedCriticalOptions)
+
+ if role.AllowedCriticalOptions != "" {
+ notAllowedOptions := []string{}
+ allowedCriticalOptions := strings.Split(role.AllowedCriticalOptions, ",")
+
+ for option := range criticalOptions {
+ if !strutil.StrListContains(allowedCriticalOptions, option) {
+ notAllowedOptions = append(notAllowedOptions, option)
+ }
+ }
+
+ if len(notAllowedOptions) != 0 {
+ return nil, fmt.Errorf("Critical options not on allowed list: %v", notAllowedOptions)
+ }
+ }
+
+ return criticalOptions, nil
+}
+
+func (b *backend) calculateExtensions(data *framework.FieldData, role *sshRole) (map[string]string, error) {
+ unparsedExtensions := data.Get("extensions").(map[string]interface{})
+ if len(unparsedExtensions) == 0 {
+ return role.DefaultExtensions, nil
+ }
+
+ extensions := convertMapToStringValue(unparsedExtensions)
+
+ if role.AllowedExtensions != "" {
+ notAllowed := []string{}
+ allowedExtensions := strings.Split(role.AllowedExtensions, ",")
+
+ for extension := range extensions {
+ if !strutil.StrListContains(allowedExtensions, extension) {
+ notAllowed = append(notAllowed, extension)
+ }
+ }
+
+ if len(notAllowed) != 0 {
+ return nil, fmt.Errorf("extensions %v are not on allowed list", notAllowed)
+ }
+ }
+
+ return extensions, nil
+}
+
+func (b *backend) calculateTTL(data *framework.FieldData, role *sshRole) (time.Duration, error) {
+
+ var ttl, maxTTL time.Duration
+ var ttlField string
+ ttlFieldInt, ok := data.GetOk("ttl")
+ if !ok {
+ ttlField = role.TTL
+ } else {
+ ttlField = ttlFieldInt.(string)
+ }
+
+ if len(ttlField) == 0 {
+ ttl = b.System().DefaultLeaseTTL()
+ } else {
+ var err error
+ ttl, err = parseutil.ParseDurationSecond(ttlField)
+ if err != nil {
+ return 0, fmt.Errorf("invalid requested ttl: %s", err)
+ }
+ }
+
+ if len(role.MaxTTL) == 0 {
+ maxTTL = b.System().MaxLeaseTTL()
+ } else {
+ var err error
+ maxTTL, err = parseutil.ParseDurationSecond(role.MaxTTL)
+ if err != nil {
+ return 0, fmt.Errorf("invalid requested max ttl: %s", err)
+ }
+ }
+
+ if ttl > maxTTL {
+ // Don't error if they were using system defaults, only error if
+ // they specifically chose a bad TTL
+ if len(ttlField) == 0 {
+ ttl = maxTTL
+ } else {
+ return 0, fmt.Errorf("ttl is larger than maximum allowed (%d)", maxTTL/time.Second)
+ }
+ }
+
+ return ttl, nil
+}
+
+func (b *creationBundle) sign() (*ssh.Certificate, error) {
+ serialNumber, err := certutil.GenerateSerialNumber()
+ if err != nil {
+ return nil, err
+ }
+
+ now := time.Now()
+
+ certificate := &ssh.Certificate{
+ Serial: serialNumber.Uint64(),
+ Key: b.PublicKey,
+ KeyId: b.KeyId,
+ ValidPrincipals: b.ValidPrincipals,
+ ValidAfter: uint64(now.Add(-30 * time.Second).In(time.UTC).Unix()),
+ ValidBefore: uint64(now.Add(b.TTL).In(time.UTC).Unix()),
+ CertType: b.CertificateType,
+ Permissions: ssh.Permissions{
+ CriticalOptions: b.CriticalOptions,
+ Extensions: b.Extensions,
+ },
+ }
+
+ err = certificate.SignCert(rand.Reader, b.Signer)
+ if err != nil {
+ return nil, fmt.Errorf("failed to generate signed SSH key")
+ }
+
+ return certificate, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_verify.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_verify.go
new file mode 100644
index 0000000..9cb98ad
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_verify.go
@@ -0,0 +1,97 @@
+package ssh
+
+import (
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathVerify(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "verify",
+ Fields: map[string]*framework.FieldSchema{
+ "otp": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "[Required] One-Time-Key that needs to be validated",
+ },
+ },
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathVerifyWrite,
+ },
+ HelpSynopsis: pathVerifyHelpSyn,
+ HelpDescription: pathVerifyHelpDesc,
+ }
+}
+
+func (b *backend) getOTP(s logical.Storage, n string) (*sshOTP, error) {
+ entry, err := s.Get("otp/" + n)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result sshOTP
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+func (b *backend) pathVerifyWrite(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ otp := d.Get("otp").(string)
+
+ // If OTP is not a UUID and a string matching VerifyEchoRequest, then the
+ // response will be VerifyEchoResponse. This is used by agent to check if
+ // connection to Vault server is proper.
+ if otp == api.VerifyEchoRequest {
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "message": api.VerifyEchoResponse,
+ },
+ }, nil
+ }
+
+ // Create the salt of OTP because entry would have been create with the
+ // salt and not directly of the OTP. Salt will yield the same value which
+ // because the seed is the same, the backend salt.
+ otpSalted := b.salt.SaltID(otp)
+
+ // Return nil if there is no entry found for the OTP
+ otpEntry, err := b.getOTP(req.Storage, otpSalted)
+ if err != nil {
+ return nil, err
+ }
+ if otpEntry == nil {
+ return logical.ErrorResponse("OTP not found"), nil
+ }
+
+ // Delete the OTP if found. This is what makes the key an OTP.
+ err = req.Storage.Delete("otp/" + otpSalted)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return username and IP only if there were no problems uptill this point.
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "username": otpEntry.Username,
+ "ip": otpEntry.IP,
+ "role_name": otpEntry.RoleName,
+ },
+ }, nil
+}
+
+const pathVerifyHelpSyn = `
+Validate the OTP provided by Vault SSH Agent.
+`
+
+const pathVerifyHelpDesc = `
+This path will be used by Vault SSH Agent runnin in the remote hosts. The OTP
+provided by the client is sent to Vault for validation by the agent. If Vault
+finds an entry for the OTP, it responds with the username and IP it is associated
+with. Agent uses this information to authenticate the client. Vault deletes the
+OTP after validating it once.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/secret_dynamic_key.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/secret_dynamic_key.go
new file mode 100644
index 0000000..bbe9df2
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/secret_dynamic_key.go
@@ -0,0 +1,71 @@
+package ssh
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/errwrap"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ "github.com/mitchellh/mapstructure"
+)
+
+const SecretDynamicKeyType = "secret_dynamic_key_type"
+
+func secretDynamicKey(b *backend) *framework.Secret {
+ return &framework.Secret{
+ Type: SecretDynamicKeyType,
+ Fields: map[string]*framework.FieldSchema{
+ "username": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Username in host",
+ },
+ "ip": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "IP address of host",
+ },
+ },
+
+ Renew: b.secretDynamicKeyRenew,
+ Revoke: b.secretDynamicKeyRevoke,
+ }
+}
+
+func (b *backend) secretDynamicKeyRenew(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ f := framework.LeaseExtend(0, 0, b.System())
+ return f(req, d)
+}
+
+func (b *backend) secretDynamicKeyRevoke(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ type sec struct {
+ AdminUser string `mapstructure:"admin_user"`
+ Username string `mapstructure:"username"`
+ IP string `mapstructure:"ip"`
+ HostKeyName string `mapstructure:"host_key_name"`
+ DynamicPublicKey string `mapstructure:"dynamic_public_key"`
+ InstallScript string `mapstructure:"install_script"`
+ Port int `mapstructure:"port"`
+ }
+
+ intSec := &sec{}
+ err := mapstructure.Decode(req.Secret.InternalData, intSec)
+ if err != nil {
+ return nil, errwrap.Wrapf("secret internal data could not be decoded: {{err}}", err)
+ }
+
+ // Fetch the host key using the key name
+ hostKey, err := b.getKey(req.Storage, intSec.HostKeyName)
+ if err != nil {
+ return nil, fmt.Errorf("key %q not found error: %v", intSec.HostKeyName, err)
+ }
+ if hostKey == nil {
+ return nil, fmt.Errorf("key %q not found", intSec.HostKeyName)
+ }
+
+ // Remove the public key from authorized_keys file in target machine
+ // The last param 'false' indicates that the key should be uninstalled.
+ err = b.installPublicKeyInTarget(intSec.AdminUser, intSec.Username, intSec.IP, intSec.Port, hostKey.Key, intSec.DynamicPublicKey, intSec.InstallScript, false)
+ if err != nil {
+ return nil, fmt.Errorf("error removing public key from authorized_keys file in target")
+ }
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/secret_otp.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/secret_otp.go
new file mode 100644
index 0000000..d0e4dd5
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/secret_otp.go
@@ -0,0 +1,41 @@
+package ssh
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+const SecretOTPType = "secret_otp_type"
+
+func secretOTP(b *backend) *framework.Secret {
+ return &framework.Secret{
+ Type: SecretOTPType,
+ Fields: map[string]*framework.FieldSchema{
+ "otp": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "One time password",
+ },
+ },
+
+ Revoke: b.secretOTPRevoke,
+ }
+}
+
+func (b *backend) secretOTPRevoke(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ otpRaw, ok := req.Secret.InternalData["otp"]
+ if !ok {
+ return nil, fmt.Errorf("secret is missing internal data")
+ }
+ otp, ok := otpRaw.(string)
+ if !ok {
+ return nil, fmt.Errorf("secret is missing internal data")
+ }
+
+ err := req.Storage.Delete("otp/" + b.salt.SaltID(otp))
+ if err != nil {
+ return nil, err
+ }
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/util.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/util.go
new file mode 100644
index 0000000..c18ccaf
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/util.go
@@ -0,0 +1,213 @@
+package ssh
+
+import (
+ "bytes"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/pem"
+ "fmt"
+ "net"
+ "strings"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+
+ log "github.com/mgutz/logxi/v1"
+ "golang.org/x/crypto/ssh"
+)
+
+// Creates a new RSA key pair with the given key length. The private key will be
+// of pem format and the public key will be of OpenSSH format.
+func generateRSAKeys(keyBits int) (publicKeyRsa string, privateKeyRsa string, err error) {
+ privateKey, err := rsa.GenerateKey(rand.Reader, keyBits)
+ if err != nil {
+ return "", "", fmt.Errorf("error generating RSA key-pair: %v", err)
+ }
+
+ privateKeyRsa = string(pem.EncodeToMemory(&pem.Block{
+ Type: "RSA PRIVATE KEY",
+ Bytes: x509.MarshalPKCS1PrivateKey(privateKey),
+ }))
+
+ sshPublicKey, err := ssh.NewPublicKey(privateKey.Public())
+ if err != nil {
+ return "", "", fmt.Errorf("error generating RSA key-pair: %v", err)
+ }
+ publicKeyRsa = "ssh-rsa " + base64.StdEncoding.EncodeToString(sshPublicKey.Marshal())
+ return
+}
+
+// Public key and the script to install the key are uploaded to remote machine.
+// Public key is either added or removed from authorized_keys file using the
+// script. Default script is for a Linux machine and hence the path of the
+// authorized_keys file is hard coded to resemble Linux.
+//
+// The last param 'install' if false, uninstalls the key.
+func (b *backend) installPublicKeyInTarget(adminUser, username, ip string, port int, hostkey, dynamicPublicKey, installScript string, install bool) error {
+ // Transfer the newly generated public key to remote host under a random
+ // file name. This is to avoid name collisions from other requests.
+ _, publicKeyFileName, err := b.GenerateSaltedOTP()
+ if err != nil {
+ return err
+ }
+
+ comm, err := createSSHComm(b.Logger(), adminUser, ip, port, hostkey)
+ if err != nil {
+ return err
+ }
+ defer comm.Close()
+
+ err = comm.Upload(publicKeyFileName, bytes.NewBufferString(dynamicPublicKey), nil)
+ if err != nil {
+ return fmt.Errorf("error uploading public key: %v", err)
+ }
+
+ // Transfer the script required to install or uninstall the key to the remote
+ // host under a random file name as well. This is to avoid name collisions
+ // from other requests.
+ scriptFileName := fmt.Sprintf("%s.sh", publicKeyFileName)
+ err = comm.Upload(scriptFileName, bytes.NewBufferString(installScript), nil)
+ if err != nil {
+ return fmt.Errorf("error uploading install script: %v", err)
+ }
+
+ // Create a session to run remote command that triggers the script to install
+ // or uninstall the key.
+ session, err := comm.NewSession()
+ if err != nil {
+ return fmt.Errorf("unable to create SSH Session using public keys: %v", err)
+ }
+ if session == nil {
+ return fmt.Errorf("invalid session object")
+ }
+ defer session.Close()
+
+ authKeysFileName := fmt.Sprintf("/home/%s/.ssh/authorized_keys", username)
+
+ var installOption string
+ if install {
+ installOption = "install"
+ } else {
+ installOption = "uninstall"
+ }
+
+ // Give execute permissions to install script, run and delete it.
+ chmodCmd := fmt.Sprintf("chmod +x %s", scriptFileName)
+ scriptCmd := fmt.Sprintf("./%s %s %s %s", scriptFileName, installOption, publicKeyFileName, authKeysFileName)
+ rmCmd := fmt.Sprintf("rm -f %s", scriptFileName)
+ targetCmd := fmt.Sprintf("%s;%s;%s", chmodCmd, scriptCmd, rmCmd)
+
+ session.Run(targetCmd)
+ return nil
+}
+
+// Takes an IP address and role name and checks if the IP is part
+// of CIDR blocks belonging to the role.
+func roleContainsIP(s logical.Storage, roleName string, ip string) (bool, error) {
+ if roleName == "" {
+ return false, fmt.Errorf("missing role name")
+ }
+
+ if ip == "" {
+ return false, fmt.Errorf("missing ip")
+ }
+
+ roleEntry, err := s.Get(fmt.Sprintf("roles/%s", roleName))
+ if err != nil {
+ return false, fmt.Errorf("error retrieving role %v", err)
+ }
+ if roleEntry == nil {
+ return false, fmt.Errorf("role %q not found", roleName)
+ }
+
+ var role sshRole
+ if err := roleEntry.DecodeJSON(&role); err != nil {
+ return false, fmt.Errorf("error decoding role %q", roleName)
+ }
+
+ if matched, err := cidrListContainsIP(ip, role.CIDRList); err != nil {
+ return false, err
+ } else {
+ return matched, nil
+ }
+}
+
+// Returns true if the IP supplied by the user is part of the comma
+// separated CIDR blocks
+func cidrListContainsIP(ip, cidrList string) (bool, error) {
+ if len(cidrList) == 0 {
+ return false, fmt.Errorf("IP does not belong to role")
+ }
+ for _, item := range strings.Split(cidrList, ",") {
+ _, cidrIPNet, err := net.ParseCIDR(item)
+ if err != nil {
+ return false, fmt.Errorf("invalid CIDR entry %q", item)
+ }
+ if cidrIPNet.Contains(net.ParseIP(ip)) {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+func createSSHComm(logger log.Logger, username, ip string, port int, hostkey string) (*comm, error) {
+ signer, err := ssh.ParsePrivateKey([]byte(hostkey))
+ if err != nil {
+ return nil, err
+ }
+
+ clientConfig := &ssh.ClientConfig{
+ User: username,
+ Auth: []ssh.AuthMethod{
+ ssh.PublicKeys(signer),
+ },
+ }
+
+ connfunc := func() (net.Conn, error) {
+ c, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", ip, port), 15*time.Second)
+ if err != nil {
+ return nil, err
+ }
+
+ if tcpConn, ok := c.(*net.TCPConn); ok {
+ tcpConn.SetKeepAlive(true)
+ tcpConn.SetKeepAlivePeriod(5 * time.Second)
+ }
+
+ return c, nil
+ }
+ config := &SSHCommConfig{
+ SSHConfig: clientConfig,
+ Connection: connfunc,
+ Pty: false,
+ DisableAgent: true,
+ Logger: logger,
+ }
+
+ return SSHCommNew(fmt.Sprintf("%s:%d", ip, port), config)
+}
+
+func parsePublicSSHKey(key string) (ssh.PublicKey, error) {
+ keyParts := strings.Split(key, " ")
+ if len(keyParts) > 1 {
+ // Someone has sent the 'full' public key rather than just the base64 encoded part that the ssh library wants
+ key = keyParts[1]
+ }
+
+ decodedKey, err := base64.StdEncoding.DecodeString(key)
+ if err != nil {
+ return nil, err
+ }
+
+ return ssh.ParsePublicKey([]byte(decodedKey))
+}
+
+func convertMapToStringValue(initial map[string]interface{}) map[string]string {
+ result := map[string]string{}
+ for key, value := range initial {
+ result[key] = fmt.Sprintf("%v", value)
+ }
+ return result
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/backend.go
new file mode 100644
index 0000000..37ebca4
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/backend.go
@@ -0,0 +1,67 @@
+package transit
+
+import (
+ "strings"
+
+ "github.com/hashicorp/vault/helper/keysutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
+ b := Backend(conf)
+ be, err := b.Backend.Setup(conf)
+ if err != nil {
+ return nil, err
+ }
+
+ return be, nil
+}
+
+func Backend(conf *logical.BackendConfig) *backend {
+ var b backend
+ b.Backend = &framework.Backend{
+ Paths: []*framework.Path{
+ // Rotate/Config needs to come before Keys
+ // as the handler is greedy
+ b.pathConfig(),
+ b.pathRotate(),
+ b.pathRewrap(),
+ b.pathKeys(),
+ b.pathListKeys(),
+ b.pathExportKeys(),
+ b.pathEncrypt(),
+ b.pathDecrypt(),
+ b.pathDatakey(),
+ b.pathRandom(),
+ b.pathHash(),
+ b.pathHMAC(),
+ b.pathSign(),
+ b.pathVerify(),
+ },
+
+ Secrets: []*framework.Secret{},
+
+ Invalidate: b.invalidate,
+ }
+
+ b.lm = keysutil.NewLockManager(conf.System.CachingDisabled())
+
+ return &b
+}
+
+type backend struct {
+ *framework.Backend
+ lm *keysutil.LockManager
+}
+
+func (b *backend) invalidate(key string) {
+ if b.Logger().IsTrace() {
+ b.Logger().Trace("transit: invalidating key", "key", key)
+ }
+ switch {
+ case strings.HasPrefix(key, "policy/"):
+ name := strings.TrimPrefix(key, "policy/")
+ b.lm.InvalidatePolicy(name)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/backend_test.go
new file mode 100644
index 0000000..0f9d06f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/backend_test.go
@@ -0,0 +1,1039 @@
+package transit
+
+import (
+ "encoding/base64"
+ "fmt"
+ "math/rand"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ uuid "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/helper/keysutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ logicaltest "github.com/hashicorp/vault/logical/testing"
+ "github.com/mitchellh/mapstructure"
+)
+
+const (
+ testPlaintext = "the quick brown fox"
+)
+
+func createBackendWithStorage(t *testing.T) (*backend, logical.Storage) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+
+ b := Backend(config)
+ if b == nil {
+ t.Fatalf("failed to create backend")
+ }
+ _, err := b.Backend.Setup(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return b, config.StorageView
+}
+
+func TestBackend_basic(t *testing.T) {
+ decryptData := make(map[string]interface{})
+ logicaltest.Test(t, logicaltest.TestCase{
+ Factory: Factory,
+ Steps: []logicaltest.TestStep{
+ testAccStepListPolicy(t, "test", true),
+ testAccStepWritePolicy(t, "test", false),
+ testAccStepListPolicy(t, "test", false),
+ testAccStepReadPolicy(t, "test", false, false),
+ testAccStepEncrypt(t, "test", testPlaintext, decryptData),
+ testAccStepDecrypt(t, "test", testPlaintext, decryptData),
+ testAccStepEncrypt(t, "test", "", decryptData),
+ testAccStepDecrypt(t, "test", "", decryptData),
+ testAccStepDeleteNotDisabledPolicy(t, "test"),
+ testAccStepEnableDeletion(t, "test"),
+ testAccStepDeletePolicy(t, "test"),
+ testAccStepWritePolicy(t, "test", false),
+ testAccStepEnableDeletion(t, "test"),
+ testAccStepDisableDeletion(t, "test"),
+ testAccStepDeleteNotDisabledPolicy(t, "test"),
+ testAccStepEnableDeletion(t, "test"),
+ testAccStepDeletePolicy(t, "test"),
+ testAccStepReadPolicy(t, "test", true, false),
+ },
+ })
+}
+
+func TestBackend_upsert(t *testing.T) {
+ decryptData := make(map[string]interface{})
+ logicaltest.Test(t, logicaltest.TestCase{
+ Factory: Factory,
+ Steps: []logicaltest.TestStep{
+ testAccStepReadPolicy(t, "test", true, false),
+ testAccStepListPolicy(t, "test", true),
+ testAccStepEncryptUpsert(t, "test", testPlaintext, decryptData),
+ testAccStepListPolicy(t, "test", false),
+ testAccStepReadPolicy(t, "test", false, false),
+ testAccStepDecrypt(t, "test", testPlaintext, decryptData),
+ },
+ })
+}
+
+func TestBackend_datakey(t *testing.T) {
+ dataKeyInfo := make(map[string]interface{})
+ logicaltest.Test(t, logicaltest.TestCase{
+ Factory: Factory,
+ Steps: []logicaltest.TestStep{
+ testAccStepListPolicy(t, "test", true),
+ testAccStepWritePolicy(t, "test", false),
+ testAccStepListPolicy(t, "test", false),
+ testAccStepReadPolicy(t, "test", false, false),
+ testAccStepWriteDatakey(t, "test", false, 256, dataKeyInfo),
+ testAccStepDecryptDatakey(t, "test", dataKeyInfo),
+ testAccStepWriteDatakey(t, "test", true, 128, dataKeyInfo),
+ },
+ })
+}
+
+func TestBackend_rotation(t *testing.T) {
+ decryptData := make(map[string]interface{})
+ encryptHistory := make(map[int]map[string]interface{})
+ logicaltest.Test(t, logicaltest.TestCase{
+ Factory: Factory,
+ Steps: []logicaltest.TestStep{
+ testAccStepListPolicy(t, "test", true),
+ testAccStepWritePolicy(t, "test", false),
+ testAccStepListPolicy(t, "test", false),
+ testAccStepEncryptVX(t, "test", testPlaintext, decryptData, 0, encryptHistory),
+ testAccStepEncryptVX(t, "test", testPlaintext, decryptData, 1, encryptHistory),
+ testAccStepRotate(t, "test"), // now v2
+ testAccStepEncryptVX(t, "test", testPlaintext, decryptData, 2, encryptHistory),
+ testAccStepRotate(t, "test"), // now v3
+ testAccStepEncryptVX(t, "test", testPlaintext, decryptData, 3, encryptHistory),
+ testAccStepRotate(t, "test"), // now v4
+ testAccStepEncryptVX(t, "test", testPlaintext, decryptData, 4, encryptHistory),
+ testAccStepDecrypt(t, "test", testPlaintext, decryptData),
+ testAccStepEncryptVX(t, "test", testPlaintext, decryptData, 99, encryptHistory),
+ testAccStepDecryptExpectFailure(t, "test", testPlaintext, decryptData),
+ testAccStepLoadVX(t, "test", decryptData, 0, encryptHistory),
+ testAccStepDecrypt(t, "test", testPlaintext, decryptData),
+ testAccStepLoadVX(t, "test", decryptData, 1, encryptHistory),
+ testAccStepDecrypt(t, "test", testPlaintext, decryptData),
+ testAccStepLoadVX(t, "test", decryptData, 2, encryptHistory),
+ testAccStepDecrypt(t, "test", testPlaintext, decryptData),
+ testAccStepLoadVX(t, "test", decryptData, 3, encryptHistory),
+ testAccStepDecrypt(t, "test", testPlaintext, decryptData),
+ testAccStepLoadVX(t, "test", decryptData, 99, encryptHistory),
+ testAccStepDecryptExpectFailure(t, "test", testPlaintext, decryptData),
+ testAccStepLoadVX(t, "test", decryptData, 4, encryptHistory),
+ testAccStepDecrypt(t, "test", testPlaintext, decryptData),
+ testAccStepDeleteNotDisabledPolicy(t, "test"),
+ testAccStepAdjustPolicy(t, "test", 3),
+ testAccStepLoadVX(t, "test", decryptData, 0, encryptHistory),
+ testAccStepDecryptExpectFailure(t, "test", testPlaintext, decryptData),
+ testAccStepLoadVX(t, "test", decryptData, 1, encryptHistory),
+ testAccStepDecryptExpectFailure(t, "test", testPlaintext, decryptData),
+ testAccStepLoadVX(t, "test", decryptData, 2, encryptHistory),
+ testAccStepDecryptExpectFailure(t, "test", testPlaintext, decryptData),
+ testAccStepLoadVX(t, "test", decryptData, 3, encryptHistory),
+ testAccStepDecrypt(t, "test", testPlaintext, decryptData),
+ testAccStepLoadVX(t, "test", decryptData, 4, encryptHistory),
+ testAccStepDecrypt(t, "test", testPlaintext, decryptData),
+ testAccStepAdjustPolicy(t, "test", 1),
+ testAccStepLoadVX(t, "test", decryptData, 0, encryptHistory),
+ testAccStepDecrypt(t, "test", testPlaintext, decryptData),
+ testAccStepLoadVX(t, "test", decryptData, 1, encryptHistory),
+ testAccStepDecrypt(t, "test", testPlaintext, decryptData),
+ testAccStepLoadVX(t, "test", decryptData, 2, encryptHistory),
+ testAccStepDecrypt(t, "test", testPlaintext, decryptData),
+ testAccStepRewrap(t, "test", decryptData, 4),
+ testAccStepDecrypt(t, "test", testPlaintext, decryptData),
+ testAccStepEnableDeletion(t, "test"),
+ testAccStepDeletePolicy(t, "test"),
+ testAccStepReadPolicy(t, "test", true, false),
+ testAccStepListPolicy(t, "test", true),
+ },
+ })
+}
+
+func TestBackend_basic_derived(t *testing.T) {
+ decryptData := make(map[string]interface{})
+ logicaltest.Test(t, logicaltest.TestCase{
+ Factory: Factory,
+ Steps: []logicaltest.TestStep{
+ testAccStepListPolicy(t, "test", true),
+ testAccStepWritePolicy(t, "test", true),
+ testAccStepListPolicy(t, "test", false),
+ testAccStepReadPolicy(t, "test", false, true),
+ testAccStepEncryptContext(t, "test", testPlaintext, "my-cool-context", decryptData),
+ testAccStepDecrypt(t, "test", testPlaintext, decryptData),
+ testAccStepEnableDeletion(t, "test"),
+ testAccStepDeletePolicy(t, "test"),
+ testAccStepReadPolicy(t, "test", true, true),
+ },
+ })
+}
+
+func testAccStepWritePolicy(t *testing.T, name string, derived bool) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "keys/" + name,
+ Data: map[string]interface{}{
+ "derived": derived,
+ },
+ }
+}
+
+func testAccStepListPolicy(t *testing.T, name string, expectNone bool) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ListOperation,
+ Path: "keys",
+ Check: func(resp *logical.Response) error {
+ if resp == nil {
+ return fmt.Errorf("missing response")
+ }
+ if expectNone {
+ keysRaw, ok := resp.Data["keys"]
+ if ok || keysRaw != nil {
+ return fmt.Errorf("response data when expecting none")
+ }
+ return nil
+ }
+ if len(resp.Data) == 0 {
+ return fmt.Errorf("no data returned")
+ }
+
+ var d struct {
+ Keys []string `mapstructure:"keys"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+ if len(d.Keys) > 0 && d.Keys[0] != name {
+ return fmt.Errorf("bad name: %#v", d)
+ }
+ if len(d.Keys) != 1 {
+ return fmt.Errorf("only 1 key expected, %d returned", len(d.Keys))
+ }
+ return nil
+ },
+ }
+}
+
+func testAccStepAdjustPolicy(t *testing.T, name string, minVer int) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "keys/" + name + "/config",
+ Data: map[string]interface{}{
+ "min_decryption_version": minVer,
+ },
+ }
+}
+
+func testAccStepDisableDeletion(t *testing.T, name string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "keys/" + name + "/config",
+ Data: map[string]interface{}{
+ "deletion_allowed": false,
+ },
+ }
+}
+
+func testAccStepEnableDeletion(t *testing.T, name string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "keys/" + name + "/config",
+ Data: map[string]interface{}{
+ "deletion_allowed": true,
+ },
+ }
+}
+
+func testAccStepDeletePolicy(t *testing.T, name string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.DeleteOperation,
+ Path: "keys/" + name,
+ }
+}
+
+func testAccStepDeleteNotDisabledPolicy(t *testing.T, name string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.DeleteOperation,
+ Path: "keys/" + name,
+ ErrorOk: true,
+ Check: func(resp *logical.Response) error {
+ if resp == nil {
+ return fmt.Errorf("Got nil response instead of error")
+ }
+ if resp.IsError() {
+ return nil
+ }
+ return fmt.Errorf("expected error but did not get one")
+ },
+ }
+}
+
+func testAccStepReadPolicy(t *testing.T, name string, expectNone, derived bool) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "keys/" + name,
+ Check: func(resp *logical.Response) error {
+ if resp == nil && !expectNone {
+ return fmt.Errorf("missing response")
+ } else if expectNone {
+ if resp != nil {
+ return fmt.Errorf("response when expecting none")
+ }
+ return nil
+ }
+ var d struct {
+ Name string `mapstructure:"name"`
+ Key []byte `mapstructure:"key"`
+ Keys map[string]int64 `mapstructure:"keys"`
+ Type string `mapstructure:"type"`
+ Derived bool `mapstructure:"derived"`
+ KDF string `mapstructure:"kdf"`
+ DeletionAllowed bool `mapstructure:"deletion_allowed"`
+ ConvergentEncryption bool `mapstructure:"convergent_encryption"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+
+ if d.Name != name {
+ return fmt.Errorf("bad name: %#v", d)
+ }
+ if d.Type != keysutil.KeyType(keysutil.KeyType_AES256_GCM96).String() {
+ return fmt.Errorf("bad key type: %#v", d)
+ }
+ // Should NOT get a key back
+ if d.Key != nil {
+ return fmt.Errorf("bad: %#v", d)
+ }
+ if d.Keys == nil {
+ return fmt.Errorf("bad: %#v", d)
+ }
+ if d.DeletionAllowed == true {
+ return fmt.Errorf("bad: %#v", d)
+ }
+ if d.Derived != derived {
+ return fmt.Errorf("bad: %#v", d)
+ }
+ if derived && d.KDF != "hkdf_sha256" {
+ return fmt.Errorf("bad: %#v", d)
+ }
+ return nil
+ },
+ }
+}
+
+func testAccStepEncrypt(
+ t *testing.T, name, plaintext string, decryptData map[string]interface{}) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "encrypt/" + name,
+ Data: map[string]interface{}{
+ "plaintext": base64.StdEncoding.EncodeToString([]byte(plaintext)),
+ },
+ Check: func(resp *logical.Response) error {
+ var d struct {
+ Ciphertext string `mapstructure:"ciphertext"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+ if d.Ciphertext == "" {
+ return fmt.Errorf("missing ciphertext")
+ }
+ decryptData["ciphertext"] = d.Ciphertext
+ return nil
+ },
+ }
+}
+
+func testAccStepEncryptUpsert(
+ t *testing.T, name, plaintext string, decryptData map[string]interface{}) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.CreateOperation,
+ Path: "encrypt/" + name,
+ Data: map[string]interface{}{
+ "plaintext": base64.StdEncoding.EncodeToString([]byte(plaintext)),
+ },
+ Check: func(resp *logical.Response) error {
+ var d struct {
+ Ciphertext string `mapstructure:"ciphertext"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+ if d.Ciphertext == "" {
+ return fmt.Errorf("missing ciphertext")
+ }
+ decryptData["ciphertext"] = d.Ciphertext
+ return nil
+ },
+ }
+}
+
+func testAccStepEncryptContext(
+ t *testing.T, name, plaintext, context string, decryptData map[string]interface{}) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "encrypt/" + name,
+ Data: map[string]interface{}{
+ "plaintext": base64.StdEncoding.EncodeToString([]byte(plaintext)),
+ "context": base64.StdEncoding.EncodeToString([]byte(context)),
+ },
+ Check: func(resp *logical.Response) error {
+ var d struct {
+ Ciphertext string `mapstructure:"ciphertext"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+ if d.Ciphertext == "" {
+ return fmt.Errorf("missing ciphertext")
+ }
+ decryptData["ciphertext"] = d.Ciphertext
+ decryptData["context"] = base64.StdEncoding.EncodeToString([]byte(context))
+ return nil
+ },
+ }
+}
+
+func testAccStepDecrypt(
+ t *testing.T, name, plaintext string, decryptData map[string]interface{}) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "decrypt/" + name,
+ Data: decryptData,
+ Check: func(resp *logical.Response) error {
+ var d struct {
+ Plaintext string `mapstructure:"plaintext"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+
+ // Decode the base64
+ plainRaw, err := base64.StdEncoding.DecodeString(d.Plaintext)
+ if err != nil {
+ return err
+ }
+
+ if string(plainRaw) != plaintext {
+ return fmt.Errorf("plaintext mismatch: %s expect: %s, decryptData was %#v", plainRaw, plaintext, decryptData)
+ }
+ return nil
+ },
+ }
+}
+
+func testAccStepRewrap(
+ t *testing.T, name string, decryptData map[string]interface{}, expectedVer int) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "rewrap/" + name,
+ Data: decryptData,
+ Check: func(resp *logical.Response) error {
+ var d struct {
+ Ciphertext string `mapstructure:"ciphertext"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+ if d.Ciphertext == "" {
+ return fmt.Errorf("missing ciphertext")
+ }
+ splitStrings := strings.Split(d.Ciphertext, ":")
+ verString := splitStrings[1][1:]
+ ver, err := strconv.Atoi(verString)
+ if err != nil {
+ return fmt.Errorf("Error pulling out version from verString '%s', ciphertext was %s", verString, d.Ciphertext)
+ }
+ if ver != expectedVer {
+ return fmt.Errorf("Did not get expected version")
+ }
+ decryptData["ciphertext"] = d.Ciphertext
+ return nil
+ },
+ }
+}
+
+func testAccStepEncryptVX(
+ t *testing.T, name, plaintext string, decryptData map[string]interface{},
+ ver int, encryptHistory map[int]map[string]interface{}) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "encrypt/" + name,
+ Data: map[string]interface{}{
+ "plaintext": base64.StdEncoding.EncodeToString([]byte(plaintext)),
+ },
+ Check: func(resp *logical.Response) error {
+ var d struct {
+ Ciphertext string `mapstructure:"ciphertext"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+ if d.Ciphertext == "" {
+ return fmt.Errorf("missing ciphertext")
+ }
+ splitStrings := strings.Split(d.Ciphertext, ":")
+ splitStrings[1] = "v" + strconv.Itoa(ver)
+ ciphertext := strings.Join(splitStrings, ":")
+ decryptData["ciphertext"] = ciphertext
+ encryptHistory[ver] = map[string]interface{}{
+ "ciphertext": ciphertext,
+ }
+ return nil
+ },
+ }
+}
+
+func testAccStepLoadVX(
+ t *testing.T, name string, decryptData map[string]interface{},
+ ver int, encryptHistory map[int]map[string]interface{}) logicaltest.TestStep {
+ // This is really a no-op to allow us to do data manip in the check function
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "keys/" + name,
+ Check: func(resp *logical.Response) error {
+ decryptData["ciphertext"] = encryptHistory[ver]["ciphertext"].(string)
+ return nil
+ },
+ }
+}
+
+func testAccStepDecryptExpectFailure(
+ t *testing.T, name, plaintext string, decryptData map[string]interface{}) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "decrypt/" + name,
+ Data: decryptData,
+ ErrorOk: true,
+ Check: func(resp *logical.Response) error {
+ if !resp.IsError() {
+ return fmt.Errorf("expected error")
+ }
+ return nil
+ },
+ }
+}
+
+func testAccStepRotate(t *testing.T, name string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "keys/" + name + "/rotate",
+ }
+}
+
+func testAccStepWriteDatakey(t *testing.T, name string,
+ noPlaintext bool, bits int,
+ dataKeyInfo map[string]interface{}) logicaltest.TestStep {
+ data := map[string]interface{}{}
+ subPath := "plaintext"
+ if noPlaintext {
+ subPath = "wrapped"
+ }
+ if bits != 256 {
+ data["bits"] = bits
+ }
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "datakey/" + subPath + "/" + name,
+ Data: data,
+ Check: func(resp *logical.Response) error {
+ var d struct {
+ Plaintext string `mapstructure:"plaintext"`
+ Ciphertext string `mapstructure:"ciphertext"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+ if noPlaintext && len(d.Plaintext) != 0 {
+ return fmt.Errorf("received plaintxt when we disabled it")
+ }
+ if !noPlaintext {
+ if len(d.Plaintext) == 0 {
+ return fmt.Errorf("did not get plaintext when we expected it")
+ }
+ dataKeyInfo["plaintext"] = d.Plaintext
+ plainBytes, err := base64.StdEncoding.DecodeString(d.Plaintext)
+ if err != nil {
+ return fmt.Errorf("could not base64 decode plaintext string '%s'", d.Plaintext)
+ }
+ if len(plainBytes)*8 != bits {
+ return fmt.Errorf("returned key does not have correct bit length")
+ }
+ }
+ dataKeyInfo["ciphertext"] = d.Ciphertext
+ return nil
+ },
+ }
+}
+
+func testAccStepDecryptDatakey(t *testing.T, name string,
+ dataKeyInfo map[string]interface{}) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "decrypt/" + name,
+ Data: dataKeyInfo,
+ Check: func(resp *logical.Response) error {
+ var d struct {
+ Plaintext string `mapstructure:"plaintext"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+
+ if d.Plaintext != dataKeyInfo["plaintext"].(string) {
+ return fmt.Errorf("plaintext mismatch: got '%s', expected '%s', decryptData was %#v", d.Plaintext, dataKeyInfo["plaintext"].(string), resp.Data)
+ }
+ return nil
+ },
+ }
+}
+
+func TestKeyUpgrade(t *testing.T) {
+ key, _ := uuid.GenerateRandomBytes(32)
+ p := &keysutil.Policy{
+ Name: "test",
+ Key: key,
+ Type: keysutil.KeyType_AES256_GCM96,
+ }
+
+ p.MigrateKeyToKeysMap()
+
+ if p.Key != nil ||
+ p.Keys == nil ||
+ len(p.Keys) != 1 ||
+ !reflect.DeepEqual(p.Keys[1].AESKey, key) {
+ t.Errorf("bad key migration, result is %#v", p.Keys)
+ }
+}
+
+func TestDerivedKeyUpgrade(t *testing.T) {
+ storage := &logical.InmemStorage{}
+ key, _ := uuid.GenerateRandomBytes(32)
+ context, _ := uuid.GenerateRandomBytes(32)
+
+ p := &keysutil.Policy{
+ Name: "test",
+ Key: key,
+ Type: keysutil.KeyType_AES256_GCM96,
+ Derived: true,
+ }
+
+ p.MigrateKeyToKeysMap()
+ p.Upgrade(storage) // Need to run the upgrade code to make the migration stick
+
+ if p.KDF != keysutil.Kdf_hmac_sha256_counter {
+ t.Fatalf("bad KDF value by default; counter val is %d, KDF val is %d, policy is %#v", keysutil.Kdf_hmac_sha256_counter, p.KDF, *p)
+ }
+
+ derBytesOld, err := p.DeriveKey(context, 1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ derBytesOld2, err := p.DeriveKey(context, 1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !reflect.DeepEqual(derBytesOld, derBytesOld2) {
+ t.Fatal("mismatch of same context alg")
+ }
+
+ p.KDF = keysutil.Kdf_hkdf_sha256
+ if p.NeedsUpgrade() {
+ t.Fatal("expected no upgrade needed")
+ }
+
+ derBytesNew, err := p.DeriveKey(context, 1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ derBytesNew2, err := p.DeriveKey(context, 1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !reflect.DeepEqual(derBytesNew, derBytesNew2) {
+ t.Fatal("mismatch of same context alg")
+ }
+
+ if reflect.DeepEqual(derBytesOld, derBytesNew) {
+ t.Fatal("match of different context alg")
+ }
+}
+
+func TestConvergentEncryption(t *testing.T) {
+ testConvergentEncryptionCommon(t, 0)
+ testConvergentEncryptionCommon(t, 2)
+}
+
+func testConvergentEncryptionCommon(t *testing.T, ver int) {
+ var b *backend
+ sysView := logical.TestSystemView()
+ storage := &logical.InmemStorage{}
+
+ b = Backend(&logical.BackendConfig{
+ StorageView: storage,
+ System: sysView,
+ })
+
+ req := &logical.Request{
+ Storage: storage,
+ Operation: logical.UpdateOperation,
+ Path: "keys/testkeynonderived",
+ Data: map[string]interface{}{
+ "derived": false,
+ "convergent_encryption": true,
+ },
+ }
+
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil {
+ t.Fatal("expected non-nil response")
+ }
+ if !resp.IsError() {
+ t.Fatalf("bad: expected error response, got %#v", *resp)
+ }
+
+ p := &keysutil.Policy{
+ Name: "testkey",
+ Type: keysutil.KeyType_AES256_GCM96,
+ Derived: true,
+ ConvergentEncryption: true,
+ ConvergentVersion: ver,
+ }
+
+ err = p.Rotate(storage)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // First, test using an invalid length of nonce -- this is only used for v1 convergent
+ req.Path = "encrypt/testkey"
+ if ver < 2 {
+ req.Data = map[string]interface{}{
+ "plaintext": "emlwIHphcA==", // "zip zap"
+ "nonce": "Zm9vIGJhcg==", // "foo bar"
+ "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S",
+ }
+ resp, err = b.HandleRequest(req)
+ if resp == nil {
+ t.Fatal("expected non-nil response")
+ }
+ if !resp.IsError() {
+ t.Fatalf("expected error response, got %#v", *resp)
+ }
+
+ // Ensure we fail if we do not provide a nonce
+ req.Data = map[string]interface{}{
+ "plaintext": "emlwIHphcA==", // "zip zap"
+ "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S",
+ }
+ resp, err = b.HandleRequest(req)
+ if err == nil && (resp == nil || !resp.IsError()) {
+ t.Fatal("expected error response")
+ }
+ }
+
+ // Now test encrypting the same value twice
+ req.Data = map[string]interface{}{
+ "plaintext": "emlwIHphcA==", // "zip zap"
+ "nonce": "b25ldHdvdGhyZWVl", // "onetwothreee"
+ "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S",
+ }
+ resp, err = b.HandleRequest(req)
+ if resp == nil {
+ t.Fatal("expected non-nil response")
+ }
+ if resp.IsError() {
+ t.Fatalf("got error response: %#v", *resp)
+ }
+ ciphertext1 := resp.Data["ciphertext"].(string)
+
+ resp, err = b.HandleRequest(req)
+ if resp == nil {
+ t.Fatal("expected non-nil response")
+ }
+ if resp.IsError() {
+ t.Fatalf("got error response: %#v", *resp)
+ }
+ ciphertext2 := resp.Data["ciphertext"].(string)
+
+ if ciphertext1 != ciphertext2 {
+ t.Fatalf("expected the same ciphertext but got %s and %s", ciphertext1, ciphertext2)
+ }
+
+ // For sanity, also check a different nonce value...
+ req.Data = map[string]interface{}{
+ "plaintext": "emlwIHphcA==", // "zip zap"
+ "nonce": "dHdvdGhyZWVmb3Vy", // "twothreefour"
+ "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S",
+ }
+ if ver < 2 {
+ req.Data["nonce"] = "dHdvdGhyZWVmb3Vy" // "twothreefour"
+ } else {
+ req.Data["context"] = "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOldandSdd7S"
+ }
+
+ resp, err = b.HandleRequest(req)
+ if resp == nil {
+ t.Fatal("expected non-nil response")
+ }
+ if resp.IsError() {
+ t.Fatalf("got error response: %#v", *resp)
+ }
+ ciphertext3 := resp.Data["ciphertext"].(string)
+
+ resp, err = b.HandleRequest(req)
+ if resp == nil {
+ t.Fatal("expected non-nil response")
+ }
+ if resp.IsError() {
+ t.Fatalf("got error response: %#v", *resp)
+ }
+ ciphertext4 := resp.Data["ciphertext"].(string)
+
+ if ciphertext3 != ciphertext4 {
+ t.Fatalf("expected the same ciphertext but got %s and %s", ciphertext3, ciphertext4)
+ }
+ if ciphertext1 == ciphertext3 {
+ t.Fatalf("expected different ciphertexts")
+ }
+
+ // ...and a different context value
+ req.Data = map[string]interface{}{
+ "plaintext": "emlwIHphcA==", // "zip zap"
+ "nonce": "dHdvdGhyZWVmb3Vy", // "twothreefour"
+ "context": "qV4h9iQyvn+raODOer4JNAsOhkXBwdT4HZ677Ql4KLqXSU+Jk4C/fXBWbv6xkSYT",
+ }
+ resp, err = b.HandleRequest(req)
+ if resp == nil {
+ t.Fatal("expected non-nil response")
+ }
+ if resp.IsError() {
+ t.Fatalf("got error response: %#v", *resp)
+ }
+ ciphertext5 := resp.Data["ciphertext"].(string)
+
+ resp, err = b.HandleRequest(req)
+ if resp == nil {
+ t.Fatal("expected non-nil response")
+ }
+ if resp.IsError() {
+ t.Fatalf("got error response: %#v", *resp)
+ }
+ ciphertext6 := resp.Data["ciphertext"].(string)
+
+ if ciphertext5 != ciphertext6 {
+ t.Fatalf("expected the same ciphertext but got %s and %s", ciphertext5, ciphertext6)
+ }
+ if ciphertext1 == ciphertext5 {
+ t.Fatalf("expected different ciphertexts")
+ }
+ if ciphertext3 == ciphertext5 {
+ t.Fatalf("expected different ciphertexts")
+ }
+
+ // Finally, check operations on empty values
+ // First, check without setting a plaintext at all
+ req.Data = map[string]interface{}{
+ "nonce": "b25ldHdvdGhyZWVl", // "onetwothreee"
+ "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S",
+ }
+ resp, err = b.HandleRequest(req)
+ if resp == nil {
+ t.Fatal("expected non-nil response")
+ }
+ if !resp.IsError() {
+ t.Fatalf("expected error response, got: %#v", *resp)
+ }
+
+ // Now set plaintext to empty
+ req.Data = map[string]interface{}{
+ "plaintext": "",
+ "nonce": "b25ldHdvdGhyZWVl", // "onetwothreee"
+ "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S",
+ }
+ resp, err = b.HandleRequest(req)
+ if resp == nil {
+ t.Fatal("expected non-nil response")
+ }
+ if resp.IsError() {
+ t.Fatalf("got error response: %#v", *resp)
+ }
+ ciphertext7 := resp.Data["ciphertext"].(string)
+
+ resp, err = b.HandleRequest(req)
+ if resp == nil {
+ t.Fatal("expected non-nil response")
+ }
+ if resp.IsError() {
+ t.Fatalf("got error response: %#v", *resp)
+ }
+ ciphertext8 := resp.Data["ciphertext"].(string)
+
+ if ciphertext7 != ciphertext8 {
+ t.Fatalf("expected the same ciphertext but got %s and %s", ciphertext7, ciphertext8)
+ }
+}
+
+func TestPolicyFuzzing(t *testing.T) {
+ var be *backend
+ sysView := logical.TestSystemView()
+
+ be = Backend(&logical.BackendConfig{
+ System: sysView,
+ })
+ testPolicyFuzzingCommon(t, be)
+
+ sysView.CachingDisabledVal = true
+ be = Backend(&logical.BackendConfig{
+ System: sysView,
+ })
+ testPolicyFuzzingCommon(t, be)
+}
+
+func testPolicyFuzzingCommon(t *testing.T, be *backend) {
+ storage := &logical.InmemStorage{}
+ wg := sync.WaitGroup{}
+
+ funcs := []string{"encrypt", "decrypt", "rotate", "change_min_version"}
+ //keys := []string{"test1", "test2", "test3", "test4", "test5"}
+ keys := []string{"test1", "test2", "test3"}
+
+ // This is the goroutine loop
+ doFuzzy := func(id int) {
+ // Check for panics, otherwise notify we're done
+ defer func() {
+ if err := recover(); err != nil {
+ t.Fatalf("got a panic: %v", err)
+ }
+ wg.Done()
+ }()
+
+ // Holds the latest encrypted value for each key
+ latestEncryptedText := map[string]string{}
+
+ startTime := time.Now()
+ req := &logical.Request{
+ Storage: storage,
+ Data: map[string]interface{}{},
+ }
+ fd := &framework.FieldData{}
+
+ var chosenFunc, chosenKey string
+
+ //t.Errorf("Starting %d", id)
+ for {
+ // Stop after 10 seconds
+ if time.Now().Sub(startTime) > 10*time.Second {
+ return
+ }
+
+ // Pick a function and a key
+ chosenFunc = funcs[rand.Int()%len(funcs)]
+ chosenKey = keys[rand.Int()%len(keys)]
+
+ fd.Raw = map[string]interface{}{
+ "name": chosenKey,
+ }
+ fd.Schema = be.pathKeys().Fields
+
+ // Try to write the key to make sure it exists
+ _, err := be.pathPolicyWrite(req, fd)
+ if err != nil {
+ t.Fatalf("got an error: %v", err)
+ }
+
+ switch chosenFunc {
+ // Encrypt our plaintext and store the result
+ case "encrypt":
+ //t.Errorf("%s, %s, %d", chosenFunc, chosenKey, id)
+ fd.Raw["plaintext"] = base64.StdEncoding.EncodeToString([]byte(testPlaintext))
+ fd.Schema = be.pathEncrypt().Fields
+ resp, err := be.pathEncryptWrite(req, fd)
+ if err != nil {
+ t.Fatalf("got an error: %v, resp is %#v", err, *resp)
+ }
+ latestEncryptedText[chosenKey] = resp.Data["ciphertext"].(string)
+
+ // Rotate to a new key version
+ case "rotate":
+ //t.Errorf("%s, %s, %d", chosenFunc, chosenKey, id)
+ fd.Schema = be.pathRotate().Fields
+ resp, err := be.pathRotateWrite(req, fd)
+ if err != nil {
+ t.Fatalf("got an error: %v, resp is %#v, chosenKey is %s", err, *resp, chosenKey)
+ }
+
+ // Decrypt the ciphertext and compare the result
+ case "decrypt":
+ //t.Errorf("%s, %s, %d", chosenFunc, chosenKey, id)
+ ct := latestEncryptedText[chosenKey]
+ if ct == "" {
+ continue
+ }
+
+ fd.Raw["ciphertext"] = ct
+ fd.Schema = be.pathDecrypt().Fields
+ resp, err := be.pathDecryptWrite(req, fd)
+ if err != nil {
+ // This could well happen since the min version is jumping around
+ if resp.Data["error"].(string) == keysutil.ErrTooOld {
+ continue
+ }
+ t.Fatalf("got an error: %v, resp is %#v, ciphertext was %s, chosenKey is %s, id is %d", err, *resp, ct, chosenKey, id)
+ }
+ ptb64 := resp.Data["plaintext"].(string)
+ pt, err := base64.StdEncoding.DecodeString(ptb64)
+ if err != nil {
+ t.Fatalf("got an error decoding base64 plaintext: %v", err)
+ return
+ }
+ if string(pt) != testPlaintext {
+ t.Fatalf("got bad plaintext back: %s", pt)
+ }
+
+ // Change the min version, which also tests the archive functionality
+ case "change_min_version":
+ //t.Errorf("%s, %s, %d", chosenFunc, chosenKey, id)
+ resp, err := be.pathPolicyRead(req, fd)
+ if err != nil {
+ t.Fatalf("got an error reading policy %s: %v", chosenKey, err)
+ }
+ latestVersion := resp.Data["latest_version"].(int)
+
+ // keys start at version 1 so we want [1, latestVersion] not [0, latestVersion)
+ setVersion := (rand.Int() % latestVersion) + 1
+ fd.Raw["min_decryption_version"] = setVersion
+ fd.Schema = be.pathConfig().Fields
+ resp, err = be.pathConfigWrite(req, fd)
+ if err != nil {
+ t.Fatalf("got an error setting min decryption version: %v", err)
+ }
+ }
+ }
+ }
+
+ // Spawn 1000 of these workers for 10 seconds
+ for i := 0; i < 1000; i++ {
+ wg.Add(1)
+ go doFuzzy(i)
+ }
+
+ // Wait for them all to finish
+ wg.Wait()
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_config.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_config.go
new file mode 100644
index 0000000..d2b3e5f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_config.go
@@ -0,0 +1,120 @@
+package transit
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func (b *backend) pathConfig() *framework.Path {
+ return &framework.Path{
+ Pattern: "keys/" + framework.GenericNameRegex("name") + "/config",
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the key",
+ },
+
+ "min_decryption_version": &framework.FieldSchema{
+ Type: framework.TypeInt,
+ Description: `If set, the minimum version of the key allowed
+to be decrypted.`,
+ },
+
+ "deletion_allowed": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Description: "Whether to allow deletion of the key",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathConfigWrite,
+ },
+
+ HelpSynopsis: pathConfigHelpSyn,
+ HelpDescription: pathConfigHelpDesc,
+ }
+}
+
+func (b *backend) pathConfigWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ name := d.Get("name").(string)
+
+ // Check if the policy already exists before we lock everything
+ p, lock, err := b.lm.GetPolicyExclusive(req.Storage, name)
+ if lock != nil {
+ defer lock.Unlock()
+ }
+ if err != nil {
+ return nil, err
+ }
+ if p == nil {
+ return logical.ErrorResponse(
+ fmt.Sprintf("no existing key named %s could be found", name)),
+ logical.ErrInvalidRequest
+ }
+
+ resp := &logical.Response{}
+
+ persistNeeded := false
+
+ minDecryptionVersionRaw, ok := d.GetOk("min_decryption_version")
+ if ok {
+ minDecryptionVersion := minDecryptionVersionRaw.(int)
+
+ if minDecryptionVersion < 0 {
+ return logical.ErrorResponse("min decryption version cannot be negative"), nil
+ }
+
+ if minDecryptionVersion == 0 {
+ minDecryptionVersion = 1
+ resp.AddWarning("since Vault 0.3, transit key numbering starts at 1; forcing minimum to 1")
+ }
+
+ if minDecryptionVersion > 0 &&
+ minDecryptionVersion != p.MinDecryptionVersion {
+ if minDecryptionVersion > p.LatestVersion {
+ return logical.ErrorResponse(
+ fmt.Sprintf("cannot set min decryption version of %d, latest key version is %d", minDecryptionVersion, p.LatestVersion)), nil
+ }
+ p.MinDecryptionVersion = minDecryptionVersion
+ persistNeeded = true
+ }
+ }
+
+ allowDeletionInt, ok := d.GetOk("deletion_allowed")
+ if ok {
+ allowDeletion := allowDeletionInt.(bool)
+ if allowDeletion != p.DeletionAllowed {
+ p.DeletionAllowed = allowDeletion
+ persistNeeded = true
+ }
+ }
+
+ // Add this as a guard here before persisting since we now require the min
+ // decryption version to start at 1; even if it's not explicitly set here,
+ // force the upgrade
+ if p.MinDecryptionVersion == 0 {
+ p.MinDecryptionVersion = 1
+ persistNeeded = true
+ }
+
+ if !persistNeeded {
+ return nil, nil
+ }
+
+ if len(resp.Warnings()) == 0 {
+ return nil, p.Persist(req.Storage)
+ }
+
+ return resp, p.Persist(req.Storage)
+}
+
+const pathConfigHelpSyn = `Configure a named encryption key`
+
+const pathConfigHelpDesc = `
+This path is used to configure the named key. Currently, this
+supports adjusting the minimum version of the key allowed to
+be used for decryption via the min_decryption_version paramter.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_datakey.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_datakey.go
new file mode 100644
index 0000000..36c6aea
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_datakey.go
@@ -0,0 +1,159 @@
+package transit
+
+import (
+ "crypto/rand"
+ "encoding/base64"
+ "fmt"
+
+ "github.com/hashicorp/vault/helper/errutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func (b *backend) pathDatakey() *framework.Path {
+ return &framework.Path{
+ Pattern: "datakey/" + framework.GenericNameRegex("plaintext") + "/" + framework.GenericNameRegex("name"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "The backend key used for encrypting the data key",
+ },
+
+ "plaintext": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `"plaintext" will return the key in both plaintext and
+ciphertext; "wrapped" will return the ciphertext only.`,
+ },
+
+ "context": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Context for key derivation. Required for derived keys.",
+ },
+
+ "nonce": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Nonce for when convergent encryption is used",
+ },
+
+ "bits": &framework.FieldSchema{
+ Type: framework.TypeInt,
+ Description: `Number of bits for the key; currently 128, 256,
+and 512 bits are supported. Defaults to 256.`,
+ Default: 256,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathDatakeyWrite,
+ },
+
+ HelpSynopsis: pathDatakeyHelpSyn,
+ HelpDescription: pathDatakeyHelpDesc,
+ }
+}
+
+func (b *backend) pathDatakeyWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ name := d.Get("name").(string)
+
+ plaintext := d.Get("plaintext").(string)
+ plaintextAllowed := false
+ switch plaintext {
+ case "plaintext":
+ plaintextAllowed = true
+ case "wrapped":
+ default:
+ return logical.ErrorResponse("Invalid path, must be 'plaintext' or 'wrapped'"), logical.ErrInvalidRequest
+ }
+
+ var err error
+
+ // Decode the context if any
+ contextRaw := d.Get("context").(string)
+ var context []byte
+ if len(contextRaw) != 0 {
+ context, err = base64.StdEncoding.DecodeString(contextRaw)
+ if err != nil {
+ return logical.ErrorResponse("failed to base64-decode context"), logical.ErrInvalidRequest
+ }
+ }
+
+ // Decode the nonce if any
+ nonceRaw := d.Get("nonce").(string)
+ var nonce []byte
+ if len(nonceRaw) != 0 {
+ nonce, err = base64.StdEncoding.DecodeString(nonceRaw)
+ if err != nil {
+ return logical.ErrorResponse("failed to base64-decode nonce"), logical.ErrInvalidRequest
+ }
+ }
+
+ // Get the policy
+ p, lock, err := b.lm.GetPolicyShared(req.Storage, name)
+ if lock != nil {
+ defer lock.RUnlock()
+ }
+ if err != nil {
+ return nil, err
+ }
+ if p == nil {
+ return logical.ErrorResponse("policy not found"), logical.ErrInvalidRequest
+ }
+
+ newKey := make([]byte, 32)
+ bits := d.Get("bits").(int)
+ switch bits {
+ case 512:
+ newKey = make([]byte, 64)
+ case 256:
+ case 128:
+ newKey = make([]byte, 16)
+ default:
+ return logical.ErrorResponse("invalid bit length"), logical.ErrInvalidRequest
+ }
+ _, err = rand.Read(newKey)
+ if err != nil {
+ return nil, err
+ }
+
+ ciphertext, err := p.Encrypt(context, nonce, base64.StdEncoding.EncodeToString(newKey))
+ if err != nil {
+ switch err.(type) {
+ case errutil.UserError:
+ return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
+ case errutil.InternalError:
+ return nil, err
+ default:
+ return nil, err
+ }
+ }
+
+ if ciphertext == "" {
+ return nil, fmt.Errorf("empty ciphertext returned")
+ }
+
+ // Generate the response
+ resp := &logical.Response{
+ Data: map[string]interface{}{
+ "ciphertext": ciphertext,
+ },
+ }
+
+ if plaintextAllowed {
+ resp.Data["plaintext"] = base64.StdEncoding.EncodeToString(newKey)
+ }
+
+ return resp, nil
+}
+
+const pathDatakeyHelpSyn = `Generate a data key`
+
+const pathDatakeyHelpDesc = `
+This path can be used to generate a data key: a random
+key of a certain length that can be used for encryption
+and decryption, protected by the named backend key. 128, 256,
+or 512 bits can be specified; if not specified, the default
+is 256 bits. Call with the the "wrapped" path to prevent the
+(base64-encoded) plaintext key from being returned along with
+the encrypted key, the "plaintext" path returns both.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_decrypt.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_decrypt.go
new file mode 100644
index 0000000..c66931d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_decrypt.go
@@ -0,0 +1,165 @@
+package transit
+
+import (
+ "encoding/base64"
+ "fmt"
+
+ "github.com/hashicorp/vault/helper/errutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ "github.com/mitchellh/mapstructure"
+)
+
+func (b *backend) pathDecrypt() *framework.Path {
+ return &framework.Path{
+ Pattern: "decrypt/" + framework.GenericNameRegex("name"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the policy",
+ },
+
+ "ciphertext": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `
+The ciphertext to decrypt, provided as returned by encrypt.`,
+ },
+
+ "context": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `
+Base64 encoded context for key derivation. Required if key derivation is
+enabled.`,
+ },
+
+ "nonce": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `
+Base64 encoded nonce value used during encryption. Must be provided if
+convergent encryption is enabled for this key and the key was generated with
+Vault 0.6.1. Not required for keys created in 0.6.2+.`,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathDecryptWrite,
+ },
+
+ HelpSynopsis: pathDecryptHelpSyn,
+ HelpDescription: pathDecryptHelpDesc,
+ }
+}
+
+func (b *backend) pathDecryptWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ batchInputRaw := d.Raw["batch_input"]
+ var batchInputItems []BatchRequestItem
+ var err error
+ if batchInputRaw != nil {
+ err = mapstructure.Decode(batchInputRaw, &batchInputItems)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse batch input: %v", err)
+ }
+
+ if len(batchInputItems) == 0 {
+ return logical.ErrorResponse("missing batch input to process"), logical.ErrInvalidRequest
+ }
+ } else {
+ ciphertext := d.Get("ciphertext").(string)
+ if len(ciphertext) == 0 {
+ return logical.ErrorResponse("missing ciphertext to decrypt"), logical.ErrInvalidRequest
+ }
+
+ batchInputItems = make([]BatchRequestItem, 1)
+ batchInputItems[0] = BatchRequestItem{
+ Ciphertext: ciphertext,
+ Context: d.Get("context").(string),
+ Nonce: d.Get("nonce").(string),
+ }
+ }
+
+ batchResponseItems := make([]BatchResponseItem, len(batchInputItems))
+ contextSet := len(batchInputItems[0].Context) != 0
+
+ for i, item := range batchInputItems {
+ if (len(item.Context) == 0 && contextSet) || (len(item.Context) != 0 && !contextSet) {
+ return logical.ErrorResponse("context should be set either in all the request blocks or in none"), logical.ErrInvalidRequest
+ }
+
+ if item.Ciphertext == "" {
+ batchResponseItems[i].Error = "missing ciphertext to decrypt"
+ continue
+ }
+
+ // Decode the context
+ if len(item.Context) != 0 {
+ batchInputItems[i].DecodedContext, err = base64.StdEncoding.DecodeString(item.Context)
+ if err != nil {
+ batchResponseItems[i].Error = err.Error()
+ continue
+ }
+ }
+
+ // Decode the nonce
+ if len(item.Nonce) != 0 {
+ batchInputItems[i].DecodedNonce, err = base64.StdEncoding.DecodeString(item.Nonce)
+ if err != nil {
+ batchResponseItems[i].Error = err.Error()
+ continue
+ }
+ }
+ }
+
+ // Get the policy
+ p, lock, err := b.lm.GetPolicyShared(req.Storage, d.Get("name").(string))
+ if lock != nil {
+ defer lock.RUnlock()
+ }
+ if err != nil {
+ return nil, err
+ }
+ if p == nil {
+ return logical.ErrorResponse("policy not found"), logical.ErrInvalidRequest
+ }
+
+ for i, item := range batchInputItems {
+ if batchResponseItems[i].Error != "" {
+ continue
+ }
+
+ plaintext, err := p.Decrypt(item.DecodedContext, item.DecodedNonce, item.Ciphertext)
+ if err != nil {
+ switch err.(type) {
+ case errutil.UserError:
+ batchResponseItems[i].Error = err.Error()
+ continue
+ default:
+ return nil, err
+ }
+ }
+ batchResponseItems[i].Plaintext = plaintext
+ }
+
+ resp := &logical.Response{}
+ if batchInputRaw != nil {
+ resp.Data = map[string]interface{}{
+ "batch_results": batchResponseItems,
+ }
+ } else {
+ if batchResponseItems[0].Error != "" {
+ return logical.ErrorResponse(batchResponseItems[0].Error), logical.ErrInvalidRequest
+ }
+ resp.Data = map[string]interface{}{
+ "plaintext": batchResponseItems[0].Plaintext,
+ }
+ }
+
+ return resp, nil
+}
+
+const pathDecryptHelpSyn = `Decrypt a ciphertext value using a named key`
+
+const pathDecryptHelpDesc = `
+This path uses the named key from the request path to decrypt a user
+provided ciphertext. The plaintext is returned base64 encoded.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_decrypt_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_decrypt_test.go
new file mode 100644
index 0000000..e0d4b6e
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_decrypt_test.go
@@ -0,0 +1,181 @@
+package transit
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+// Case1: If batch decryption input is not base64 encoded, it should fail.
+func TestTransit_BatchDecryptionCase1(t *testing.T) {
+ var resp *logical.Response
+ var err error
+
+ b, s := createBackendWithStorage(t)
+
+ batchEncryptionInput := []interface{}{
+ map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="},
+ map[string]interface{}{"plaintext": "Cg=="},
+ }
+
+ batchEncryptionData := map[string]interface{}{
+ "batch_input": batchEncryptionInput,
+ }
+
+ batchEncryptionReq := &logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "encrypt/upserted_key",
+ Storage: s,
+ Data: batchEncryptionData,
+ }
+ resp, err = b.HandleRequest(batchEncryptionReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ batchDecryptionData := map[string]interface{}{
+ "batch_input": resp.Data["batch_results"],
+ }
+
+ batchDecryptionReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "decrypt/upserted_key",
+ Storage: s,
+ Data: batchDecryptionData,
+ }
+ resp, err = b.HandleRequest(batchDecryptionReq)
+ if err == nil {
+ t.Fatalf("expected an error")
+ }
+}
+
+// Case2: Normal case of batch decryption
+func TestTransit_BatchDecryptionCase2(t *testing.T) {
+ var resp *logical.Response
+ var err error
+
+ b, s := createBackendWithStorage(t)
+
+ batchEncryptionInput := []interface{}{
+ map[string]interface{}{"plaintext": "Cg=="},
+ map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="},
+ }
+ batchEncryptionData := map[string]interface{}{
+ "batch_input": batchEncryptionInput,
+ }
+
+ batchEncryptionReq := &logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "encrypt/upserted_key",
+ Storage: s,
+ Data: batchEncryptionData,
+ }
+ resp, err = b.HandleRequest(batchEncryptionReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ batchResponseItems := resp.Data["batch_results"].([]BatchResponseItem)
+ batchDecryptionInput := make([]interface{}, len(batchResponseItems))
+ for i, item := range batchResponseItems {
+ batchDecryptionInput[i] = map[string]interface{}{"ciphertext": item.Ciphertext}
+ }
+ batchDecryptionData := map[string]interface{}{
+ "batch_input": batchDecryptionInput,
+ }
+
+ batchDecryptionReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "decrypt/upserted_key",
+ Storage: s,
+ Data: batchDecryptionData,
+ }
+ resp, err = b.HandleRequest(batchDecryptionReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ batchDecryptionResponseItems := resp.Data["batch_results"].([]BatchResponseItem)
+
+ plaintext1 := "dGhlIHF1aWNrIGJyb3duIGZveA=="
+ plaintext2 := "Cg=="
+ for _, item := range batchDecryptionResponseItems {
+ if item.Plaintext != plaintext1 && item.Plaintext != plaintext2 {
+ t.Fatalf("bad: plaintext: %q", item.Plaintext)
+ }
+ }
+}
+
+// Case3: Test batch decryption with a derived key
+func TestTransit_BatchDecryptionCase3(t *testing.T) {
+ var resp *logical.Response
+ var err error
+
+ b, s := createBackendWithStorage(t)
+
+ policyData := map[string]interface{}{
+ "derived": true,
+ }
+
+ policyReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "keys/existing_key",
+ Storage: s,
+ Data: policyData,
+ }
+
+ resp, err = b.HandleRequest(policyReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ batchInput := []interface{}{
+ map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "context": "dGVzdGNvbnRleHQ="},
+ map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "context": "dGVzdGNvbnRleHQ="},
+ }
+
+ batchData := map[string]interface{}{
+ "batch_input": batchInput,
+ }
+ batchReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "encrypt/existing_key",
+ Storage: s,
+ Data: batchData,
+ }
+ resp, err = b.HandleRequest(batchReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ batchDecryptionInputItems := resp.Data["batch_results"].([]BatchResponseItem)
+
+ batchDecryptionInput := make([]interface{}, len(batchDecryptionInputItems))
+ for i, item := range batchDecryptionInputItems {
+ batchDecryptionInput[i] = map[string]interface{}{"ciphertext": item.Ciphertext, "context": "dGVzdGNvbnRleHQ="}
+ }
+
+ batchDecryptionData := map[string]interface{}{
+ "batch_input": batchDecryptionInput,
+ }
+
+ batchDecryptionReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "decrypt/existing_key",
+ Storage: s,
+ Data: batchDecryptionData,
+ }
+ resp, err = b.HandleRequest(batchDecryptionReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ batchDecryptionResponseItems := resp.Data["batch_results"].([]BatchResponseItem)
+
+ plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA=="
+ for _, item := range batchDecryptionResponseItems {
+ if item.Plaintext != plaintext {
+ t.Fatalf("bad: plaintext. Expected: %q, Actual: %q", plaintext, item.Plaintext)
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_encrypt.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_encrypt.go
new file mode 100644
index 0000000..b4281d6
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_encrypt.go
@@ -0,0 +1,291 @@
+package transit
+
+import (
+ "encoding/base64"
+ "fmt"
+ "sync"
+
+ "github.com/hashicorp/vault/helper/errutil"
+ "github.com/hashicorp/vault/helper/keysutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ "github.com/mitchellh/mapstructure"
+)
+
+// BatchRequestItem represents a request item for batch processing
+type BatchRequestItem struct {
+ // Context for key derivation. This is required for derived keys.
+ Context string `json:"context" structs:"context" mapstructure:"context"`
+
+ // DecodedContext is the base64 decoded version of Context
+ DecodedContext []byte
+
+ // Plaintext for encryption
+ Plaintext string `json:"plaintext" structs:"plaintext" mapstructure:"plaintext"`
+
+ // Ciphertext for decryption
+ Ciphertext string `json:"ciphertext" structs:"ciphertext" mapstructure:"ciphertext"`
+
+ // Nonce to be used when v1 convergent encryption is used
+ Nonce string `json:"nonce" structs:"nonce" mapstructure:"nonce"`
+
+ // DecodedNonce is the base64 decoded version of Nonce
+ DecodedNonce []byte
+}
+
+// BatchResponseItem represents a response item for batch processing
+type BatchResponseItem struct {
+ // Ciphertext for the plaintext present in the corresponding batch
+ // request item
+ Ciphertext string `json:"ciphertext,omitempty" structs:"ciphertext" mapstructure:"ciphertext"`
+
+ // Plaintext for the ciphertext present in the corresponsding batch
+ // request item
+ Plaintext string `json:"plaintext,omitempty" structs:"plaintext" mapstructure:"plaintext"`
+
+ // Error, if set represents a failure encountered while encrypting a
+ // corresponding batch request item
+ Error string `json:"error,omitempty" structs:"error" mapstructure:"error"`
+}
+
+func (b *backend) pathEncrypt() *framework.Path {
+ return &framework.Path{
+ Pattern: "encrypt/" + framework.GenericNameRegex("name"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the policy",
+ },
+
+ "plaintext": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Base64 encoded plaintext value to be encrypted",
+ },
+
+ "context": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Base64 encoded context for key derivation. Required if key derivation is enabled",
+ },
+
+ "nonce": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `
+Base64 encoded nonce value. Must be provided if convergent encryption is
+enabled for this key and the key was generated with Vault 0.6.1. Not required
+for keys created in 0.6.2+. The value must be exactly 96 bits (12 bytes) long
+and the user must ensure that for any given context (and thus, any given
+encryption key) this nonce value is **never reused**.
+`,
+ },
+
+ "type": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "aes256-gcm96",
+ Description: `
+This parameter is required when encryption key is expected to be created.
+When performing an upsert operation, the type of key to create. Currently,
+"aes256-gcm96" (symmetric) is the only type supported. Defaults to
+"aes256-gcm96".`,
+ },
+
+ "convergent_encryption": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Description: `
+This parameter will only be used when a key is expected to be created. Whether
+to support convergent encryption. This is only supported when using a key with
+key derivation enabled and will require all requests to carry both a context
+and 96-bit (12-byte) nonce. The given nonce will be used in place of a randomly
+generated nonce. As a result, when the same context and nonce are supplied, the
+same ciphertext is generated. It is *very important* when using this mode that
+you ensure that all nonces are unique for a given context. Failing to do so
+will severely impact the ciphertext's security.`,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.CreateOperation: b.pathEncryptWrite,
+ logical.UpdateOperation: b.pathEncryptWrite,
+ },
+
+ ExistenceCheck: b.pathEncryptExistenceCheck,
+
+ HelpSynopsis: pathEncryptHelpSyn,
+ HelpDescription: pathEncryptHelpDesc,
+ }
+}
+
+func (b *backend) pathEncryptExistenceCheck(
+ req *logical.Request, d *framework.FieldData) (bool, error) {
+ name := d.Get("name").(string)
+ p, lock, err := b.lm.GetPolicyShared(req.Storage, name)
+ if lock != nil {
+ defer lock.RUnlock()
+ }
+ if err != nil {
+ return false, err
+ }
+ return p != nil, nil
+}
+
+func (b *backend) pathEncryptWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ name := d.Get("name").(string)
+ var err error
+
+ batchInputRaw := d.Raw["batch_input"]
+ var batchInputItems []BatchRequestItem
+ if batchInputRaw != nil {
+ err = mapstructure.Decode(batchInputRaw, &batchInputItems)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse batch input: %v", err)
+ }
+
+ if len(batchInputItems) == 0 {
+ return logical.ErrorResponse("missing batch input to process"), logical.ErrInvalidRequest
+ }
+ } else {
+ valueRaw, ok := d.GetOk("plaintext")
+ if !ok {
+ return logical.ErrorResponse("missing plaintext to encrypt"), logical.ErrInvalidRequest
+ }
+
+ batchInputItems = make([]BatchRequestItem, 1)
+ batchInputItems[0] = BatchRequestItem{
+ Plaintext: valueRaw.(string),
+ Context: d.Get("context").(string),
+ Nonce: d.Get("nonce").(string),
+ }
+ }
+
+ batchResponseItems := make([]BatchResponseItem, len(batchInputItems))
+ contextSet := len(batchInputItems[0].Context) != 0
+
+ // Before processing the batch request items, get the policy. If the
+ // policy is supposed to be upserted, then determine if 'derived' is to
+ // be set or not, based on the presence of 'context' field in all the
+ // input items.
+ for i, item := range batchInputItems {
+ if (len(item.Context) == 0 && contextSet) || (len(item.Context) != 0 && !contextSet) {
+ return logical.ErrorResponse("context should be set either in all the request blocks or in none"), logical.ErrInvalidRequest
+ }
+
+ _, err := base64.StdEncoding.DecodeString(item.Plaintext)
+ if err != nil {
+ batchResponseItems[i].Error = "failed to base64-decode plaintext"
+ continue
+ }
+
+ // Decode the context
+ if len(item.Context) != 0 {
+ batchInputItems[i].DecodedContext, err = base64.StdEncoding.DecodeString(item.Context)
+ if err != nil {
+ batchResponseItems[i].Error = err.Error()
+ continue
+ }
+ }
+
+ // Decode the nonce
+ if len(item.Nonce) != 0 {
+ batchInputItems[i].DecodedNonce, err = base64.StdEncoding.DecodeString(item.Nonce)
+ if err != nil {
+ batchResponseItems[i].Error = err.Error()
+ continue
+ }
+ }
+ }
+
+ // Get the policy
+ var p *keysutil.Policy
+ var lock *sync.RWMutex
+ var upserted bool
+ if req.Operation == logical.CreateOperation {
+ convergent := d.Get("convergent_encryption").(bool)
+ if convergent && !contextSet {
+ return logical.ErrorResponse("convergent encryption requires derivation to be enabled, so context is required"), nil
+ }
+
+ polReq := keysutil.PolicyRequest{
+ Storage: req.Storage,
+ Name: name,
+ Derived: contextSet,
+ Convergent: convergent,
+ }
+
+ keyType := d.Get("type").(string)
+ switch keyType {
+ case "aes256-gcm96":
+ polReq.KeyType = keysutil.KeyType_AES256_GCM96
+ case "ecdsa-p256":
+ return logical.ErrorResponse(fmt.Sprintf("key type %v not supported for this operation", keyType)), logical.ErrInvalidRequest
+ default:
+ return logical.ErrorResponse(fmt.Sprintf("unknown key type %v", keyType)), logical.ErrInvalidRequest
+ }
+
+ p, lock, upserted, err = b.lm.GetPolicyUpsert(polReq)
+
+ } else {
+ p, lock, err = b.lm.GetPolicyShared(req.Storage, name)
+ }
+ if lock != nil {
+ defer lock.RUnlock()
+ }
+ if err != nil {
+ return nil, err
+ }
+ if p == nil {
+ return logical.ErrorResponse("policy not found"), logical.ErrInvalidRequest
+ }
+
+ // Process batch request items. If encryption of any request
+ // item fails, respectively mark the error in the response
+ // collection and continue to process other items.
+ for i, item := range batchInputItems {
+ if batchResponseItems[i].Error != "" {
+ continue
+ }
+
+ ciphertext, err := p.Encrypt(item.DecodedContext, item.DecodedNonce, item.Plaintext)
+ if err != nil {
+ switch err.(type) {
+ case errutil.UserError:
+ batchResponseItems[i].Error = err.Error()
+ continue
+ default:
+ return nil, err
+ }
+ }
+
+ if ciphertext == "" {
+ return nil, fmt.Errorf("empty ciphertext returned for input item %d", i)
+ }
+
+ batchResponseItems[i].Ciphertext = ciphertext
+ }
+
+ resp := &logical.Response{}
+ if batchInputRaw != nil {
+ resp.Data = map[string]interface{}{
+ "batch_results": batchResponseItems,
+ }
+ } else {
+ if batchResponseItems[0].Error != "" {
+ return logical.ErrorResponse(batchResponseItems[0].Error), logical.ErrInvalidRequest
+ }
+ resp.Data = map[string]interface{}{
+ "ciphertext": batchResponseItems[0].Ciphertext,
+ }
+ }
+
+ if req.Operation == logical.CreateOperation && !upserted {
+ resp.AddWarning("Attempted creation of the key during the encrypt operation, but it was created beforehand")
+ }
+ return resp, nil
+}
+
+const pathEncryptHelpSyn = `Encrypt a plaintext value or a batch of plaintext
+blocks using a named key`
+
+const pathEncryptHelpDesc = `
+This path uses the named key from the request path to encrypt a user provided
+plaintext or a batch of plaintext blocks. The plaintext must be base64 encoded.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_encrypt_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_encrypt_test.go
new file mode 100644
index 0000000..6ab20db
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_encrypt_test.go
@@ -0,0 +1,547 @@
+package transit
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/mitchellh/mapstructure"
+)
+
+// Case1: Ensure that batch encryption did not affect the normal flow of
+// encrypting the plaintext with a pre-existing key.
+func TestTransit_BatchEncryptionCase1(t *testing.T) {
+ var resp *logical.Response
+ var err error
+
+ b, s := createBackendWithStorage(t)
+
+ // Create the policy
+ policyReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "keys/existing_key",
+ Storage: s,
+ }
+ resp, err = b.HandleRequest(policyReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA=="
+
+ encData := map[string]interface{}{
+ "plaintext": plaintext,
+ }
+
+ encReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "encrypt/existing_key",
+ Storage: s,
+ Data: encData,
+ }
+ resp, err = b.HandleRequest(encReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ ciphertext := resp.Data["ciphertext"]
+
+ decData := map[string]interface{}{
+ "ciphertext": ciphertext,
+ }
+ decReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "decrypt/existing_key",
+ Storage: s,
+ Data: decData,
+ }
+ resp, err = b.HandleRequest(decReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if resp.Data["plaintext"] != plaintext {
+ t.Fatalf("bad: plaintext. Expected: %q, Actual: %q", plaintext, resp.Data["plaintext"])
+ }
+}
+
+// Case2: Ensure that batch encryption did not affect the normal flow of
+// encrypting the plaintext with the key upserted.
+func TestTransit_BatchEncryptionCase2(t *testing.T) {
+ var resp *logical.Response
+ var err error
+ b, s := createBackendWithStorage(t)
+
+ // Upsert the key and encrypt the data
+ plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA=="
+
+ encData := map[string]interface{}{
+ "plaintext": plaintext,
+ }
+
+ encReq := &logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "encrypt/upserted_key",
+ Storage: s,
+ Data: encData,
+ }
+ resp, err = b.HandleRequest(encReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ ciphertext := resp.Data["ciphertext"]
+ decData := map[string]interface{}{
+ "ciphertext": ciphertext,
+ }
+
+ policyReq := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "keys/upserted_key",
+ Storage: s,
+ }
+
+ resp, err = b.HandleRequest(policyReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ decReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "decrypt/upserted_key",
+ Storage: s,
+ Data: decData,
+ }
+ resp, err = b.HandleRequest(decReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if resp.Data["plaintext"] != plaintext {
+ t.Fatalf("bad: plaintext. Expected: %q, Actual: %q", plaintext, resp.Data["plaintext"])
+ }
+}
+
+// Case3: If batch encryption input is not base64 encoded, it should fail.
+func TestTransit_BatchEncryptionCase3(t *testing.T) {
+ var err error
+
+ b, s := createBackendWithStorage(t)
+
+ batchInput := `[{"plaintext":"dGhlIHF1aWNrIGJyb3duIGZveA=="}]`
+ batchData := map[string]interface{}{
+ "batch_input": batchInput,
+ }
+
+ batchReq := &logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "encrypt/upserted_key",
+ Storage: s,
+ Data: batchData,
+ }
+ _, err = b.HandleRequest(batchReq)
+ if err == nil {
+ t.Fatal("expected an error")
+ }
+}
+
+// Case4: Test batch encryption with an existing key
+func TestTransit_BatchEncryptionCase4(t *testing.T) {
+ var resp *logical.Response
+ var err error
+
+ b, s := createBackendWithStorage(t)
+
+ policyReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "keys/existing_key",
+ Storage: s,
+ }
+ resp, err = b.HandleRequest(policyReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ batchInput := []interface{}{
+ map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="},
+ map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="},
+ }
+
+ batchData := map[string]interface{}{
+ "batch_input": batchInput,
+ }
+ batchReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "encrypt/existing_key",
+ Storage: s,
+ Data: batchData,
+ }
+ resp, err = b.HandleRequest(batchReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ batchResponseItems := resp.Data["batch_results"].([]BatchResponseItem)
+
+ decReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "decrypt/existing_key",
+ Storage: s,
+ }
+
+ plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA=="
+
+ for _, item := range batchResponseItems {
+ decReq.Data = map[string]interface{}{
+ "ciphertext": item.Ciphertext,
+ }
+ resp, err = b.HandleRequest(decReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if resp.Data["plaintext"] != plaintext {
+ t.Fatalf("bad: plaintext. Expected: %q, Actual: %q", plaintext, resp.Data["plaintext"])
+ }
+ }
+}
+
+// Case5: Test batch encryption with an existing derived key
+func TestTransit_BatchEncryptionCase5(t *testing.T) {
+ var resp *logical.Response
+ var err error
+
+ b, s := createBackendWithStorage(t)
+
+ policyData := map[string]interface{}{
+ "derived": true,
+ }
+
+ policyReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "keys/existing_key",
+ Storage: s,
+ Data: policyData,
+ }
+
+ resp, err = b.HandleRequest(policyReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ batchInput := []interface{}{
+ map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "context": "dmlzaGFsCg=="},
+ map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "context": "dmlzaGFsCg=="},
+ }
+
+ batchData := map[string]interface{}{
+ "batch_input": batchInput,
+ }
+
+ batchReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "encrypt/existing_key",
+ Storage: s,
+ Data: batchData,
+ }
+ resp, err = b.HandleRequest(batchReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ batchResponseItems := resp.Data["batch_results"].([]BatchResponseItem)
+
+ decReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "decrypt/existing_key",
+ Storage: s,
+ }
+
+ plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA=="
+
+ for _, item := range batchResponseItems {
+ decReq.Data = map[string]interface{}{
+ "ciphertext": item.Ciphertext,
+ "context": "dmlzaGFsCg==",
+ }
+ resp, err = b.HandleRequest(decReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if resp.Data["plaintext"] != plaintext {
+ t.Fatalf("bad: plaintext. Expected: %q, Actual: %q", plaintext, resp.Data["plaintext"])
+ }
+ }
+}
+
+// Case6: Test batch encryption with an upserted non-derived key
+func TestTransit_BatchEncryptionCase6(t *testing.T) {
+ var resp *logical.Response
+ var err error
+
+ b, s := createBackendWithStorage(t)
+
+ batchInput := []interface{}{
+ map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="},
+ map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="},
+ }
+
+ batchData := map[string]interface{}{
+ "batch_input": batchInput,
+ }
+ batchReq := &logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "encrypt/upserted_key",
+ Storage: s,
+ Data: batchData,
+ }
+ resp, err = b.HandleRequest(batchReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ batchResponseItems := resp.Data["batch_results"].([]BatchResponseItem)
+
+ decReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "decrypt/upserted_key",
+ Storage: s,
+ }
+
+ plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA=="
+
+ for _, responseItem := range batchResponseItems {
+ var item BatchResponseItem
+ if err := mapstructure.Decode(responseItem, &item); err != nil {
+ t.Fatal(err)
+ }
+ decReq.Data = map[string]interface{}{
+ "ciphertext": item.Ciphertext,
+ }
+ resp, err = b.HandleRequest(decReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if resp.Data["plaintext"] != plaintext {
+ t.Fatalf("bad: plaintext. Expected: %q, Actual: %q", plaintext, resp.Data["plaintext"])
+ }
+ }
+}
+
+// Case7: Test batch encryption with an upserted derived key
+func TestTransit_BatchEncryptionCase7(t *testing.T) {
+ var resp *logical.Response
+ var err error
+
+ b, s := createBackendWithStorage(t)
+
+ batchInput := []interface{}{
+ map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "context": "dmlzaGFsCg=="},
+ map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "context": "dmlzaGFsCg=="},
+ }
+
+ batchData := map[string]interface{}{
+ "batch_input": batchInput,
+ }
+ batchReq := &logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "encrypt/upserted_key",
+ Storage: s,
+ Data: batchData,
+ }
+ resp, err = b.HandleRequest(batchReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ batchResponseItems := resp.Data["batch_results"].([]BatchResponseItem)
+
+ decReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "decrypt/upserted_key",
+ Storage: s,
+ }
+
+ plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA=="
+
+ for _, item := range batchResponseItems {
+ decReq.Data = map[string]interface{}{
+ "ciphertext": item.Ciphertext,
+ "context": "dmlzaGFsCg==",
+ }
+ resp, err = b.HandleRequest(decReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if resp.Data["plaintext"] != plaintext {
+ t.Fatalf("bad: plaintext. Expected: %q, Actual: %q", plaintext, resp.Data["plaintext"])
+ }
+ }
+}
+
+// Case8: If plaintext is not base64 encoded, encryption should fail
+func TestTransit_BatchEncryptionCase8(t *testing.T) {
+ var resp *logical.Response
+ var err error
+
+ b, s := createBackendWithStorage(t)
+
+ // Create the policy
+ policyReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "keys/existing_key",
+ Storage: s,
+ }
+ resp, err = b.HandleRequest(policyReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ batchInput := []interface{}{
+ map[string]interface{}{"plaintext": "simple_plaintext"},
+ }
+ batchData := map[string]interface{}{
+ "batch_input": batchInput,
+ }
+ batchReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "encrypt/existing_key",
+ Storage: s,
+ Data: batchData,
+ }
+ resp, err = b.HandleRequest(batchReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ plaintext := "simple plaintext"
+
+ encData := map[string]interface{}{
+ "plaintext": plaintext,
+ }
+
+ encReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "encrypt/existing_key",
+ Storage: s,
+ Data: encData,
+ }
+ resp, err = b.HandleRequest(encReq)
+ if err == nil {
+ t.Fatal("expected an error")
+ }
+}
+
+// Case9: If both plaintext and batch inputs are supplied, plaintext should be
+// ignored.
+func TestTransit_BatchEncryptionCase9(t *testing.T) {
+ var resp *logical.Response
+ var err error
+
+ b, s := createBackendWithStorage(t)
+
+ batchInput := []interface{}{
+ map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="},
+ map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="},
+ }
+ plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA=="
+ batchData := map[string]interface{}{
+ "batch_input": batchInput,
+ "plaintext": plaintext,
+ }
+ batchReq := &logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "encrypt/upserted_key",
+ Storage: s,
+ Data: batchData,
+ }
+ resp, err = b.HandleRequest(batchReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ _, ok := resp.Data["ciphertext"]
+ if ok {
+ t.Fatal("ciphertext field should not be set")
+ }
+}
+
+// Case10: Inconsistent presence of 'context' in batch input should be caught
+func TestTransit_BatchEncryptionCase10(t *testing.T) {
+ var err error
+
+ b, s := createBackendWithStorage(t)
+
+ batchInput := []interface{}{
+ map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="},
+ map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "context": "dmlzaGFsCg=="},
+ }
+
+ batchData := map[string]interface{}{
+ "batch_input": batchInput,
+ }
+
+ batchReq := &logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "encrypt/upserted_key",
+ Storage: s,
+ Data: batchData,
+ }
+ _, err = b.HandleRequest(batchReq)
+ if err == nil {
+ t.Fatalf("expected an error")
+ }
+}
+
+// Case11: Incorrect inputs for context and nonce should not fail the operation
+func TestTransit_BatchEncryptionCase11(t *testing.T) {
+ var err error
+
+ b, s := createBackendWithStorage(t)
+
+ batchInput := []interface{}{
+ map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "context": "dmlzaGFsCg=="},
+ map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "context": "not-encoded"},
+ }
+
+ batchData := map[string]interface{}{
+ "batch_input": batchInput,
+ }
+ batchReq := &logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "encrypt/upserted_key",
+ Storage: s,
+ Data: batchData,
+ }
+ _, err = b.HandleRequest(batchReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Case12: Invalid batch input
+func TestTransit_BatchEncryptionCase12(t *testing.T) {
+ var err error
+ b, s := createBackendWithStorage(t)
+
+ batchInput := []interface{}{
+ map[string]interface{}{},
+ "unexpected_interface",
+ }
+
+ batchData := map[string]interface{}{
+ "batch_input": batchInput,
+ }
+ batchReq := &logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "encrypt/upserted_key",
+ Storage: s,
+ Data: batchData,
+ }
+ _, err = b.HandleRequest(batchReq)
+ if err == nil {
+ t.Fatalf("expected an error")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_export.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_export.go
new file mode 100644
index 0000000..1f7350e
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_export.go
@@ -0,0 +1,204 @@
+package transit
+
+import (
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/vault/helper/keysutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+const (
+ exportTypeEncryptionKey = "encryption-key"
+ exportTypeSigningKey = "signing-key"
+ exportTypeHMACKey = "hmac-key"
+)
+
+func (b *backend) pathExportKeys() *framework.Path {
+ return &framework.Path{
+ Pattern: "export/" + framework.GenericNameRegex("type") + "/" + framework.GenericNameRegex("name") + framework.OptionalParamRegex("version"),
+ Fields: map[string]*framework.FieldSchema{
+ "type": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Type of key to export (encryption-key, signing-key, hmac-key)",
+ },
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the key",
+ },
+ "version": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Version of the key",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathPolicyExportRead,
+ },
+
+ HelpSynopsis: pathExportHelpSyn,
+ HelpDescription: pathExportHelpDesc,
+ }
+}
+
+func (b *backend) pathPolicyExportRead(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ exportType := d.Get("type").(string)
+ name := d.Get("name").(string)
+ version := d.Get("version").(string)
+
+ switch exportType {
+ case exportTypeEncryptionKey:
+ case exportTypeSigningKey:
+ case exportTypeHMACKey:
+ default:
+ return logical.ErrorResponse(fmt.Sprintf("invalid export type: %s", exportType)), logical.ErrInvalidRequest
+ }
+
+ p, lock, err := b.lm.GetPolicyShared(req.Storage, name)
+ if lock != nil {
+ defer lock.RUnlock()
+ }
+ if err != nil {
+ return nil, err
+ }
+ if p == nil {
+ return nil, nil
+ }
+
+ if !p.Exportable {
+ return logical.ErrorResponse("key is not exportable"), nil
+ }
+
+ switch exportType {
+ case exportTypeEncryptionKey:
+ if !p.Type.EncryptionSupported() {
+ return logical.ErrorResponse("encryption not supported for the key"), logical.ErrInvalidRequest
+ }
+ case exportTypeSigningKey:
+ if !p.Type.SigningSupported() {
+ return logical.ErrorResponse("signing not supported for the key"), logical.ErrInvalidRequest
+ }
+ }
+
+ retKeys := map[string]string{}
+ switch version {
+ case "":
+ for k, v := range p.Keys {
+ exportKey, err := getExportKey(p, &v, exportType)
+ if err != nil {
+ return nil, err
+ }
+ retKeys[strconv.Itoa(k)] = exportKey
+ }
+
+ default:
+ var versionValue int
+ if version == "latest" {
+ versionValue = p.LatestVersion
+ } else {
+ version = strings.TrimPrefix(version, "v")
+ versionValue, err = strconv.Atoi(version)
+ if err != nil {
+ return logical.ErrorResponse("invalid key version"), logical.ErrInvalidRequest
+ }
+ }
+
+ if versionValue < p.MinDecryptionVersion {
+ return logical.ErrorResponse("version for export is below minimun decryption version"), logical.ErrInvalidRequest
+ }
+ key, ok := p.Keys[versionValue]
+ if !ok {
+ return logical.ErrorResponse("version does not exist or cannot be found"), logical.ErrInvalidRequest
+ }
+
+ exportKey, err := getExportKey(p, &key, exportType)
+ if err != nil {
+ return nil, err
+ }
+
+ retKeys[strconv.Itoa(versionValue)] = exportKey
+ }
+
+ resp := &logical.Response{
+ Data: map[string]interface{}{
+ "name": p.Name,
+ "type": p.Type.String(),
+ "keys": retKeys,
+ },
+ }
+
+ return resp, nil
+}
+
+func getExportKey(policy *keysutil.Policy, key *keysutil.KeyEntry, exportType string) (string, error) {
+ if policy == nil {
+ return "", errors.New("nil policy provided")
+ }
+
+ switch exportType {
+ case exportTypeHMACKey:
+ return strings.TrimSpace(base64.StdEncoding.EncodeToString(key.HMACKey)), nil
+
+ case exportTypeEncryptionKey:
+ switch policy.Type {
+ case keysutil.KeyType_AES256_GCM96:
+ return strings.TrimSpace(base64.StdEncoding.EncodeToString(key.AESKey)), nil
+ }
+
+ case exportTypeSigningKey:
+ switch policy.Type {
+ case keysutil.KeyType_ECDSA_P256:
+ ecKey, err := keyEntryToECPrivateKey(key, elliptic.P256())
+ if err != nil {
+ return "", err
+ }
+ return ecKey, nil
+ }
+ }
+
+ return "", fmt.Errorf("unknown key type %v", policy.Type)
+}
+
+func keyEntryToECPrivateKey(k *keysutil.KeyEntry, curve elliptic.Curve) (string, error) {
+ if k == nil {
+ return "", errors.New("nil KeyEntry provided")
+ }
+
+ privKey := &ecdsa.PrivateKey{
+ PublicKey: ecdsa.PublicKey{
+ Curve: curve,
+ X: k.EC_X,
+ Y: k.EC_Y,
+ },
+ D: k.EC_D,
+ }
+ ecder, err := x509.MarshalECPrivateKey(privKey)
+ if err != nil {
+ return "", err
+ }
+ if ecder == nil {
+ return "", errors.New("No data returned when marshalling to private key")
+ }
+
+ block := pem.Block{
+ Type: "EC PRIVATE KEY",
+ Bytes: ecder,
+ }
+ return strings.TrimSpace(string(pem.EncodeToMemory(&block))), nil
+}
+
+const pathExportHelpSyn = `Export named encryption or signing key`
+
+const pathExportHelpDesc = `
+This path is used to export the named keys that are configured as
+exportable.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_export_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_export_test.go
new file mode 100644
index 0000000..e021ac6
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_export_test.go
@@ -0,0 +1,407 @@
+package transit
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "testing"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestTransit_Export_KeyVersion_ExportsCorrectVersion(t *testing.T) {
+ verifyExportsCorrectVersion(t, "encryption-key", "aes256-gcm96")
+ verifyExportsCorrectVersion(t, "signing-key", "ecdsa-p256")
+ verifyExportsCorrectVersion(t, "hmac-key", "aes256-gcm96")
+ verifyExportsCorrectVersion(t, "hmac-key", "ecdsa-p256")
+}
+
+func verifyExportsCorrectVersion(t *testing.T, exportType, keyType string) {
+ var b *backend
+ sysView := logical.TestSystemView()
+ storage := &logical.InmemStorage{}
+
+ b = Backend(&logical.BackendConfig{
+ StorageView: storage,
+ System: sysView,
+ })
+
+ // First create a key, v1
+ req := &logical.Request{
+ Storage: storage,
+ Operation: logical.UpdateOperation,
+ Path: "keys/foo",
+ }
+ req.Data = map[string]interface{}{
+ "exportable": true,
+ "type": keyType,
+ }
+ _, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ verifyVersion := func(versionRequest string, expectedVersion int) {
+ req := &logical.Request{
+ Storage: storage,
+ Operation: logical.ReadOperation,
+ Path: fmt.Sprintf("export/%s/foo/%s", exportType, versionRequest),
+ }
+ rsp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ typRaw, ok := rsp.Data["type"]
+ if !ok {
+ t.Fatal("no type returned from export")
+ }
+ typ, ok := typRaw.(string)
+ if !ok {
+ t.Fatalf("could not find key type, resp data is %#v", rsp.Data)
+ }
+ if typ != keyType {
+ t.Fatalf("key type mismatch; %q vs %q", typ, keyType)
+ }
+
+ keysRaw, ok := rsp.Data["keys"]
+ if !ok {
+ t.Fatal("could not find keys value")
+ }
+ keys, ok := keysRaw.(map[string]string)
+ if !ok {
+ t.Fatal("could not cast to keys map")
+ }
+ if len(keys) != 1 {
+ t.Fatal("unexpected number of keys found")
+ }
+
+ for k, _ := range keys {
+ if k != strconv.Itoa(expectedVersion) {
+ t.Fatalf("expected version %q, received version %q", strconv.Itoa(expectedVersion), k)
+ }
+ }
+ }
+
+ verifyVersion("v1", 1)
+ verifyVersion("1", 1)
+ verifyVersion("latest", 1)
+
+ req.Path = "keys/foo/rotate"
+ // v2
+ _, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ verifyVersion("v1", 1)
+ verifyVersion("1", 1)
+ verifyVersion("v2", 2)
+ verifyVersion("2", 2)
+ verifyVersion("latest", 2)
+
+ // v3
+ _, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ verifyVersion("v1", 1)
+ verifyVersion("1", 1)
+ verifyVersion("v3", 3)
+ verifyVersion("3", 3)
+ verifyVersion("latest", 3)
+}
+
+func TestTransit_Export_ValidVersionsOnly(t *testing.T) {
+ var b *backend
+ sysView := logical.TestSystemView()
+ storage := &logical.InmemStorage{}
+
+ b = Backend(&logical.BackendConfig{
+ StorageView: storage,
+ System: sysView,
+ })
+
+ // First create a key, v1
+ req := &logical.Request{
+ Storage: storage,
+ Operation: logical.UpdateOperation,
+ Path: "keys/foo",
+ }
+ req.Data = map[string]interface{}{
+ "exportable": true,
+ }
+ _, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ req.Path = "keys/foo/rotate"
+ // v2
+ _, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // v3
+ _, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ verifyExport := func(validVersions []int) {
+ req = &logical.Request{
+ Storage: storage,
+ Operation: logical.ReadOperation,
+ Path: "export/encryption-key/foo",
+ }
+ rsp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, ok := rsp.Data["keys"]; !ok {
+ t.Error("no keys returned from export")
+ }
+
+ keys, ok := rsp.Data["keys"].(map[string]string)
+ if !ok {
+ t.Error("could not cast to keys object")
+ }
+ if len(keys) != len(validVersions) {
+ t.Errorf("expected %d key count, received %d", len(validVersions), len(keys))
+ }
+ for _, version := range validVersions {
+ if _, ok := keys[strconv.Itoa(version)]; !ok {
+ t.Errorf("expecting to find key version %d, not found", version)
+ }
+ }
+ }
+
+ verifyExport([]int{1, 2, 3})
+
+ req = &logical.Request{
+ Storage: storage,
+ Operation: logical.UpdateOperation,
+ Path: "keys/foo/config",
+ }
+ req.Data = map[string]interface{}{
+ "min_decryption_version": 3,
+ }
+ _, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ verifyExport([]int{3})
+
+ req = &logical.Request{
+ Storage: storage,
+ Operation: logical.UpdateOperation,
+ Path: "keys/foo/config",
+ }
+ req.Data = map[string]interface{}{
+ "min_decryption_version": 2,
+ }
+ _, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ verifyExport([]int{2, 3})
+
+ req = &logical.Request{
+ Storage: storage,
+ Operation: logical.UpdateOperation,
+ Path: "keys/foo/rotate",
+ }
+ // v4
+ _, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ verifyExport([]int{2, 3, 4})
+}
+
+func TestTransit_Export_KeysNotMarkedExportable_ReturnsError(t *testing.T) {
+ var b *backend
+ sysView := logical.TestSystemView()
+ storage := &logical.InmemStorage{}
+
+ b = Backend(&logical.BackendConfig{
+ StorageView: storage,
+ System: sysView,
+ })
+
+ req := &logical.Request{
+ Storage: storage,
+ Operation: logical.UpdateOperation,
+ Path: "keys/foo",
+ }
+ req.Data = map[string]interface{}{
+ "exportable": false,
+ }
+ _, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ req = &logical.Request{
+ Storage: storage,
+ Operation: logical.ReadOperation,
+ Path: "export/encryption-key/foo",
+ }
+ rsp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !rsp.IsError() {
+ t.Fatal("Key not marked as exportble but was exported.")
+ }
+}
+
+func TestTransit_Export_SigningDoesNotSupportSigning_ReturnsError(t *testing.T) {
+ var b *backend
+ sysView := logical.TestSystemView()
+ storage := &logical.InmemStorage{}
+
+ b = Backend(&logical.BackendConfig{
+ StorageView: storage,
+ System: sysView,
+ })
+
+ req := &logical.Request{
+ Storage: storage,
+ Operation: logical.UpdateOperation,
+ Path: "keys/foo",
+ }
+ req.Data = map[string]interface{}{
+ "exportable": true,
+ "type": "aes256-gcm96",
+ }
+ _, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ req = &logical.Request{
+ Storage: storage,
+ Operation: logical.ReadOperation,
+ Path: "export/signing-key/foo",
+ }
+ _, err = b.HandleRequest(req)
+ if err == nil {
+ t.Fatal("Key does not support signing but was exported without error.")
+ }
+}
+
+func TestTransit_Export_EncryptionDoesNotSupportEncryption_ReturnsError(t *testing.T) {
+ var b *backend
+ sysView := logical.TestSystemView()
+ storage := &logical.InmemStorage{}
+
+ b = Backend(&logical.BackendConfig{
+ StorageView: storage,
+ System: sysView,
+ })
+
+ req := &logical.Request{
+ Storage: storage,
+ Operation: logical.UpdateOperation,
+ Path: "keys/foo",
+ }
+ req.Data = map[string]interface{}{
+ "exportable": true,
+ "type": "ecdsa-p256",
+ }
+ _, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ req = &logical.Request{
+ Storage: storage,
+ Operation: logical.ReadOperation,
+ Path: "export/encryption-key/foo",
+ }
+ _, err = b.HandleRequest(req)
+ if err == nil {
+ t.Fatal("Key does not support encryption but was exported without error.")
+ }
+}
+
+func TestTransit_Export_KeysDoesNotExist_ReturnsNotFound(t *testing.T) {
+ var b *backend
+ sysView := logical.TestSystemView()
+ storage := &logical.InmemStorage{}
+
+ b = Backend(&logical.BackendConfig{
+ StorageView: storage,
+ System: sysView,
+ })
+
+ req := &logical.Request{
+ Storage: storage,
+ Operation: logical.ReadOperation,
+ Path: "export/encryption-key/foo",
+ }
+ rsp, err := b.HandleRequest(req)
+
+ if !(rsp == nil && err == nil) {
+ t.Fatal("Key does not exist but does not return not found")
+ }
+}
+
+func TestTransit_Export_EncryptionKey_DoesNotExportHMACKey(t *testing.T) {
+ var b *backend
+ sysView := logical.TestSystemView()
+ storage := &logical.InmemStorage{}
+
+ b = Backend(&logical.BackendConfig{
+ StorageView: storage,
+ System: sysView,
+ })
+
+ req := &logical.Request{
+ Storage: storage,
+ Operation: logical.UpdateOperation,
+ Path: "keys/foo",
+ }
+ req.Data = map[string]interface{}{
+ "exportable": true,
+ "type": "aes256-gcm96",
+ }
+ _, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ req = &logical.Request{
+ Storage: storage,
+ Operation: logical.ReadOperation,
+ Path: "export/encryption-key/foo",
+ }
+ encryptionKeyRsp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ req.Path = "export/hmac-key/foo"
+ hmacKeyRsp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ encryptionKeys, ok := encryptionKeyRsp.Data["keys"].(map[string]string)
+ if !ok {
+ t.Error("could not cast to keys object")
+ }
+ hmacKeys, ok := hmacKeyRsp.Data["keys"].(map[string]string)
+ if !ok {
+ t.Error("could not cast to keys object")
+ }
+ if len(hmacKeys) != len(encryptionKeys) {
+ t.Errorf("hmac (%d) and encyryption (%d) key count don't match",
+ len(hmacKeys), len(encryptionKeys))
+ }
+
+ if reflect.DeepEqual(encryptionKeyRsp.Data, hmacKeyRsp.Data) {
+ t.Fatal("Encryption key data matched hmac key data")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hash.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hash.go
new file mode 100644
index 0000000..566ac52
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hash.go
@@ -0,0 +1,116 @@
+package transit
+
+import (
+ "crypto/sha256"
+ "crypto/sha512"
+ "encoding/base64"
+ "encoding/hex"
+ "fmt"
+ "hash"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func (b *backend) pathHash() *framework.Path {
+ return &framework.Path{
+ Pattern: "hash" + framework.OptionalParamRegex("urlalgorithm"),
+ Fields: map[string]*framework.FieldSchema{
+ "input": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "The base64-encoded input data",
+ },
+
+ "algorithm": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "sha2-256",
+ Description: `Algorithm to use (POST body parameter). Valid values are:
+
+* sha2-224
+* sha2-256
+* sha2-384
+* sha2-512
+
+Defaults to "sha2-256".`,
+ },
+
+ "urlalgorithm": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `Algorithm to use (POST URL parameter)`,
+ },
+
+ "format": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "hex",
+ Description: `Encoding format to use. Can be "hex" or "base64". Defaults to "hex".`,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathHashWrite,
+ },
+
+ HelpSynopsis: pathHashHelpSyn,
+ HelpDescription: pathHashHelpDesc,
+ }
+}
+
+func (b *backend) pathHashWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ inputB64 := d.Get("input").(string)
+ format := d.Get("format").(string)
+ algorithm := d.Get("urlalgorithm").(string)
+ if algorithm == "" {
+ algorithm = d.Get("algorithm").(string)
+ }
+
+ input, err := base64.StdEncoding.DecodeString(inputB64)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("unable to decode input as base64: %s", err)), logical.ErrInvalidRequest
+ }
+
+ switch format {
+ case "hex":
+ case "base64":
+ default:
+ return logical.ErrorResponse(fmt.Sprintf("unsupported encoding format %s; must be \"hex\" or \"base64\"", format)), nil
+ }
+
+ var hf hash.Hash
+ switch algorithm {
+ case "sha2-224":
+ hf = sha256.New224()
+ case "sha2-256":
+ hf = sha256.New()
+ case "sha2-384":
+ hf = sha512.New384()
+ case "sha2-512":
+ hf = sha512.New()
+ default:
+ return logical.ErrorResponse(fmt.Sprintf("unsupported algorithm %s", algorithm)), nil
+ }
+ hf.Write(input)
+ retBytes := hf.Sum(nil)
+
+ var retStr string
+ switch format {
+ case "hex":
+ retStr = hex.EncodeToString(retBytes)
+ case "base64":
+ retStr = base64.StdEncoding.EncodeToString(retBytes)
+ }
+
+ // Generate the response
+ resp := &logical.Response{
+ Data: map[string]interface{}{
+ "sum": retStr,
+ },
+ }
+ return resp, nil
+}
+
+const pathHashHelpSyn = `Generate a hash sum for input data`
+
+const pathHashHelpDesc = `
+Generates a hash sum of the given algorithm against the given input data.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hash_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hash_test.go
new file mode 100644
index 0000000..d59976d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hash_test.go
@@ -0,0 +1,87 @@
+package transit
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestTransit_Hash(t *testing.T) {
+ var b *backend
+ sysView := logical.TestSystemView()
+ storage := &logical.InmemStorage{}
+
+ b = Backend(&logical.BackendConfig{
+ StorageView: storage,
+ System: sysView,
+ })
+
+ req := &logical.Request{
+ Storage: storage,
+ Operation: logical.UpdateOperation,
+ Path: "hash",
+ Data: map[string]interface{}{
+ "input": "dGhlIHF1aWNrIGJyb3duIGZveA==",
+ },
+ }
+
+ doRequest := func(req *logical.Request, errExpected bool, expected string) {
+ resp, err := b.HandleRequest(req)
+ if err != nil && !errExpected {
+ t.Fatal(err)
+ }
+ if resp == nil {
+ t.Fatal("expected non-nil response")
+ }
+ if errExpected {
+ if !resp.IsError() {
+ t.Fatalf("bad: got error response: %#v", *resp)
+ }
+ return
+ }
+ if resp.IsError() {
+ t.Fatalf("bad: got error response: %#v", *resp)
+ }
+ sum, ok := resp.Data["sum"]
+ if !ok {
+ t.Fatal("no sum key found in returned data")
+ }
+ if sum.(string) != expected {
+ t.Fatal("mismatched hashes")
+ }
+ }
+
+ // Test defaults -- sha2-256
+ doRequest(req, false, "9ecb36561341d18eb65484e833efea61edc74b84cf5e6ae1b81c63533e25fc8f")
+
+ // Test algorithm selection in the path
+ req.Path = "hash/sha2-224"
+ doRequest(req, false, "ea074a96cabc5a61f8298a2c470f019074642631a49e1c5e2f560865")
+
+ // Reset and test algorithm selection in the data
+ req.Path = "hash"
+ req.Data["algorithm"] = "sha2-224"
+ doRequest(req, false, "ea074a96cabc5a61f8298a2c470f019074642631a49e1c5e2f560865")
+
+ req.Data["algorithm"] = "sha2-384"
+ doRequest(req, false, "15af9ec8be783f25c583626e9491dbf129dd6dd620466fdf05b3a1d0bb8381d30f4d3ec29f923ff1e09a0f6b337365a6")
+
+ req.Data["algorithm"] = "sha2-512"
+ doRequest(req, false, "d9d380f29b97ad6a1d92e987d83fa5a02653301e1006dd2bcd51afa59a9147e9caedaf89521abc0f0b682adcd47fb512b8343c834a32f326fe9bef00542ce887")
+
+ // Test returning as base64
+ req.Data["format"] = "base64"
+ doRequest(req, false, "2dOA8puXrWodkumH2D+loCZTMB4QBt0rzVGvpZqRR+nK7a+JUhq8DwtoKtzUf7USuDQ8g0oy8yb+m+8AVCzohw==")
+
+ // Test bad input/format/algorithm
+ req.Data["format"] = "base92"
+ doRequest(req, true, "")
+
+ req.Data["format"] = "hex"
+ req.Data["algorithm"] = "foobar"
+ doRequest(req, true, "")
+
+ req.Data["algorithm"] = "sha2-256"
+ req.Data["input"] = "foobar"
+ doRequest(req, true, "")
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hmac.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hmac.go
new file mode 100644
index 0000000..31c156f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hmac.go
@@ -0,0 +1,211 @@
+package transit
+
+import (
+ "crypto/hmac"
+ "crypto/sha256"
+ "crypto/sha512"
+ "encoding/base64"
+ "fmt"
+ "hash"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func (b *backend) pathHMAC() *framework.Path {
+ return &framework.Path{
+ Pattern: "hmac/" + framework.GenericNameRegex("name") + framework.OptionalParamRegex("urlalgorithm"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "The key to use for the HMAC function",
+ },
+
+ "input": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "The base64-encoded input data",
+ },
+
+ "algorithm": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "sha2-256",
+ Description: `Algorithm to use (POST body parameter). Valid values are:
+
+* sha2-224
+* sha2-256
+* sha2-384
+* sha2-512
+
+Defaults to "sha2-256".`,
+ },
+
+ "urlalgorithm": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `Algorithm to use (POST URL parameter)`,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathHMACWrite,
+ },
+
+ HelpSynopsis: pathHMACHelpSyn,
+ HelpDescription: pathHMACHelpDesc,
+ }
+}
+
+func (b *backend) pathHMACWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ name := d.Get("name").(string)
+ inputB64 := d.Get("input").(string)
+ algorithm := d.Get("urlalgorithm").(string)
+ if algorithm == "" {
+ algorithm = d.Get("algorithm").(string)
+ }
+
+ input, err := base64.StdEncoding.DecodeString(inputB64)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("unable to decode input as base64: %s", err)), logical.ErrInvalidRequest
+ }
+
+ // Get the policy
+ p, lock, err := b.lm.GetPolicyShared(req.Storage, name)
+ if lock != nil {
+ defer lock.RUnlock()
+ }
+ if err != nil {
+ return nil, err
+ }
+ if p == nil {
+ return logical.ErrorResponse("policy not found"), logical.ErrInvalidRequest
+ }
+
+ key, err := p.HMACKey(p.LatestVersion)
+ if err != nil {
+ return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
+ }
+ if key == nil {
+ return nil, fmt.Errorf("HMAC key value could not be computed")
+ }
+
+ var hf hash.Hash
+ switch algorithm {
+ case "sha2-224":
+ hf = hmac.New(sha256.New224, key)
+ case "sha2-256":
+ hf = hmac.New(sha256.New, key)
+ case "sha2-384":
+ hf = hmac.New(sha512.New384, key)
+ case "sha2-512":
+ hf = hmac.New(sha512.New, key)
+ default:
+ return logical.ErrorResponse(fmt.Sprintf("unsupported algorithm %s", algorithm)), nil
+ }
+ hf.Write(input)
+ retBytes := hf.Sum(nil)
+
+ retStr := base64.StdEncoding.EncodeToString(retBytes)
+ retStr = fmt.Sprintf("vault:v%s:%s", strconv.Itoa(p.LatestVersion), retStr)
+
+ // Generate the response
+ resp := &logical.Response{
+ Data: map[string]interface{}{
+ "hmac": retStr,
+ },
+ }
+ return resp, nil
+}
+
+func (b *backend) pathHMACVerify(
+ req *logical.Request, d *framework.FieldData, verificationHMAC string) (*logical.Response, error) {
+
+ name := d.Get("name").(string)
+ inputB64 := d.Get("input").(string)
+ algorithm := d.Get("urlalgorithm").(string)
+ if algorithm == "" {
+ algorithm = d.Get("algorithm").(string)
+ }
+
+ input, err := base64.StdEncoding.DecodeString(inputB64)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("unable to decode input as base64: %s", err)), logical.ErrInvalidRequest
+ }
+
+ // Verify the prefix
+ if !strings.HasPrefix(verificationHMAC, "vault:v") {
+ return logical.ErrorResponse("invalid HMAC to verify: no prefix"), logical.ErrInvalidRequest
+ }
+
+ splitVerificationHMAC := strings.SplitN(strings.TrimPrefix(verificationHMAC, "vault:v"), ":", 2)
+ if len(splitVerificationHMAC) != 2 {
+ return logical.ErrorResponse("invalid HMAC: wrong number of fields"), logical.ErrInvalidRequest
+ }
+
+ ver, err := strconv.Atoi(splitVerificationHMAC[0])
+ if err != nil {
+ return logical.ErrorResponse("invalid HMAC: version number could not be decoded"), logical.ErrInvalidRequest
+ }
+
+ verBytes, err := base64.StdEncoding.DecodeString(splitVerificationHMAC[1])
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("unable to decode verification HMAC as base64: %s", err)), logical.ErrInvalidRequest
+ }
+
+ // Get the policy
+ p, lock, err := b.lm.GetPolicyShared(req.Storage, name)
+ if lock != nil {
+ defer lock.RUnlock()
+ }
+ if err != nil {
+ return nil, err
+ }
+ if p == nil {
+ return logical.ErrorResponse("policy not found"), logical.ErrInvalidRequest
+ }
+
+ if ver > p.LatestVersion {
+ return logical.ErrorResponse("invalid HMAC: version is too new"), logical.ErrInvalidRequest
+ }
+
+ if p.MinDecryptionVersion > 0 && ver < p.MinDecryptionVersion {
+ return logical.ErrorResponse("cannot verify HMAC: version is too old (disallowed by policy)"), logical.ErrInvalidRequest
+ }
+
+ key, err := p.HMACKey(ver)
+ if err != nil {
+ return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
+ }
+ if key == nil {
+ return nil, fmt.Errorf("HMAC key value could not be computed")
+ }
+
+ var hf hash.Hash
+ switch algorithm {
+ case "sha2-224":
+ hf = hmac.New(sha256.New224, key)
+ case "sha2-256":
+ hf = hmac.New(sha256.New, key)
+ case "sha2-384":
+ hf = hmac.New(sha512.New384, key)
+ case "sha2-512":
+ hf = hmac.New(sha512.New, key)
+ default:
+ return logical.ErrorResponse(fmt.Sprintf("unsupported algorithm %s", algorithm)), nil
+ }
+ hf.Write(input)
+ retBytes := hf.Sum(nil)
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "valid": hmac.Equal(retBytes, verBytes),
+ },
+ }, nil
+}
+
+const pathHMACHelpSyn = `Generate an HMAC for input data using the named key`
+
+const pathHMACHelpDesc = `
+Generates an HMAC sum of the given algorithm and key against the given input data.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hmac_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hmac_test.go
new file mode 100644
index 0000000..1dfeb9b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hmac_test.go
@@ -0,0 +1,183 @@
+package transit
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestTransit_HMAC(t *testing.T) {
+ var b *backend
+ sysView := logical.TestSystemView()
+ storage := &logical.InmemStorage{}
+
+ b = Backend(&logical.BackendConfig{
+ StorageView: storage,
+ System: sysView,
+ })
+
+ // First create a key
+ req := &logical.Request{
+ Storage: storage,
+ Operation: logical.UpdateOperation,
+ Path: "keys/foo",
+ }
+ _, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Now, change the key value to something we control
+ p, lock, err := b.lm.GetPolicyShared(storage, "foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ // We don't care as we're the only one using this
+ lock.RUnlock()
+ keyEntry := p.Keys[p.LatestVersion]
+ keyEntry.HMACKey = []byte("01234567890123456789012345678901")
+ p.Keys[p.LatestVersion] = keyEntry
+ if err = p.Persist(storage); err != nil {
+ t.Fatal(err)
+ }
+
+ req.Path = "hmac/foo"
+ req.Data = map[string]interface{}{
+ "input": "dGhlIHF1aWNrIGJyb3duIGZveA==",
+ }
+
+ doRequest := func(req *logical.Request, errExpected bool, expected string) {
+ path := req.Path
+ defer func() { req.Path = path }()
+
+ resp, err := b.HandleRequest(req)
+ if err != nil && !errExpected {
+ panic(fmt.Sprintf("%v", err))
+ }
+ if resp == nil {
+ t.Fatal("expected non-nil response")
+ }
+ if errExpected {
+ if !resp.IsError() {
+ t.Fatalf("bad: got error response: %#v", *resp)
+ }
+ return
+ }
+ if resp.IsError() {
+ t.Fatalf("bad: got error response: %#v", *resp)
+ }
+ value, ok := resp.Data["hmac"]
+ if !ok {
+ t.Fatalf("no hmac key found in returned data, got resp data %#v", resp.Data)
+ }
+ if value.(string) != expected {
+ panic(fmt.Sprintf("mismatched hashes; expected %s, got resp data %#v", expected, resp.Data))
+ }
+
+ // Now verify
+ req.Path = strings.Replace(req.Path, "hmac", "verify", -1)
+ req.Data["hmac"] = value.(string)
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("%v: %v", err, resp)
+ }
+ if resp == nil {
+ t.Fatal("expected non-nil response")
+ }
+ if resp.Data["valid"].(bool) == false {
+ panic(fmt.Sprintf("error validating hmac;\nreq:\n%#v\nresp:\n%#v", *req, *resp))
+ }
+ }
+
+ // Comparisons are against values generated via openssl
+
+ // Test defaults -- sha2-256
+ doRequest(req, false, "vault:v1:UcBvm5VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4=")
+
+ // Test algorithm selection in the path
+ req.Path = "hmac/foo/sha2-224"
+ doRequest(req, false, "vault:v1:3p+ZWVquYDvu2dSTCa65Y3fgoMfIAc6fNaBbtg==")
+
+ // Reset and test algorithm selection in the data
+ req.Path = "hmac/foo"
+ req.Data["algorithm"] = "sha2-224"
+ doRequest(req, false, "vault:v1:3p+ZWVquYDvu2dSTCa65Y3fgoMfIAc6fNaBbtg==")
+
+ req.Data["algorithm"] = "sha2-384"
+ doRequest(req, false, "vault:v1:jDB9YXdPjpmr29b1JCIEJO93IydlKVfD9mA2EO9OmJtJQg3QAV5tcRRRb7IQGW9p")
+
+ req.Data["algorithm"] = "sha2-512"
+ doRequest(req, false, "vault:v1:PSXLXvkvKF4CpU65e2bK1tGBZQpcpCEM32fq2iUoiTyQQCfBcGJJItQ+60tMwWXAPQrC290AzTrNJucGrr4GFA==")
+
+ // Test returning as base64
+ req.Data["format"] = "base64"
+ doRequest(req, false, "vault:v1:PSXLXvkvKF4CpU65e2bK1tGBZQpcpCEM32fq2iUoiTyQQCfBcGJJItQ+60tMwWXAPQrC290AzTrNJucGrr4GFA==")
+
+ req.Data["algorithm"] = "foobar"
+ doRequest(req, true, "")
+
+ req.Data["algorithm"] = "sha2-256"
+ req.Data["input"] = "foobar"
+ doRequest(req, true, "")
+ req.Data["input"] = "dGhlIHF1aWNrIGJyb3duIGZveA=="
+
+ // Rotate
+ err = p.Rotate(storage)
+ if err != nil {
+ t.Fatal(err)
+ }
+ keyEntry = p.Keys[2]
+ // Set to another value we control
+ keyEntry.HMACKey = []byte("12345678901234567890123456789012")
+ p.Keys[2] = keyEntry
+ if err = p.Persist(storage); err != nil {
+ t.Fatal(err)
+ }
+
+ doRequest(req, false, "vault:v2:Dt+mO/B93kuWUbGMMobwUNX5Wodr6dL3JH4DMfpQ0kw=")
+
+ // Verify a previous version
+ req.Path = "verify/foo"
+
+ req.Data["hmac"] = "vault:v1:UcBvm5VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4="
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("%v: %v", err, resp)
+ }
+ if resp == nil {
+ t.Fatal("expected non-nil response")
+ }
+ if resp.Data["valid"].(bool) == false {
+ t.Fatalf("error validating hmac\nreq\n%#v\nresp\n%#v", *req, *resp)
+ }
+
+ // Try a bad value
+ req.Data["hmac"] = "vault:v1:UcBvm4VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4="
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("%v: %v", err, resp)
+ }
+ if resp == nil {
+ t.Fatal("expected non-nil response")
+ }
+ if resp.Data["valid"].(bool) {
+ t.Fatalf("expected error validating hmac")
+ }
+
+ // Set min decryption version, attempt to verify
+ p.MinDecryptionVersion = 2
+ if err = p.Persist(storage); err != nil {
+ t.Fatal(err)
+ }
+
+ req.Data["hmac"] = "vault:v1:UcBvm5VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4="
+ resp, err = b.HandleRequest(req)
+ if err == nil {
+ t.Fatalf("expected an error, got response %#v", resp)
+ }
+ if err != logical.ErrInvalidRequest {
+ t.Fatalf("expected invalid request error, got %v", err)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_keys.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_keys.go
new file mode 100644
index 0000000..a69c555
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_keys.go
@@ -0,0 +1,233 @@
+package transit
+
+import (
+ "crypto/elliptic"
+ "fmt"
+ "strconv"
+
+ "github.com/hashicorp/vault/helper/keysutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func (b *backend) pathListKeys() *framework.Path {
+ return &framework.Path{
+ Pattern: "keys/?$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.pathKeysList,
+ },
+
+ HelpSynopsis: pathPolicyHelpSyn,
+ HelpDescription: pathPolicyHelpDesc,
+ }
+}
+
+func (b *backend) pathKeys() *framework.Path {
+ return &framework.Path{
+ Pattern: "keys/" + framework.GenericNameRegex("name"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the key",
+ },
+
+ "type": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "aes256-gcm96",
+ Description: `The type of key to create. Currently,
+"aes256-gcm96" (symmetric) and "ecdsa-p256" (asymmetric) are
+supported. Defaults to "aes256-gcm96".`,
+ },
+
+ "derived": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Description: `Enables key derivation mode. This
+allows for per-transaction unique
+keys for encryption operations.`,
+ },
+
+ "convergent_encryption": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Description: `Whether to support convergent encryption.
+This is only supported when using a key with
+key derivation enabled and will require all
+requests to carry both a context and 96-bit
+(12-byte) nonce. The given nonce will be used
+in place of a randomly generated nonce. As a
+result, when the same context and nonce are
+supplied, the same ciphertext is generated. It
+is *very important* when using this mode that
+you ensure that all nonces are unique for a
+given context. Failing to do so will severely
+impact the ciphertext's security.`,
+ },
+
+ "exportable": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Description: `Enables keys to be exportable.
+This allows for all the valid keys
+in the key ring to be exported.`,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathPolicyWrite,
+ logical.DeleteOperation: b.pathPolicyDelete,
+ logical.ReadOperation: b.pathPolicyRead,
+ },
+
+ HelpSynopsis: pathPolicyHelpSyn,
+ HelpDescription: pathPolicyHelpDesc,
+ }
+}
+
+func (b *backend) pathKeysList(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ entries, err := req.Storage.List("policy/")
+ if err != nil {
+ return nil, err
+ }
+
+ return logical.ListResponse(entries), nil
+}
+
+func (b *backend) pathPolicyWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ name := d.Get("name").(string)
+ derived := d.Get("derived").(bool)
+ convergent := d.Get("convergent_encryption").(bool)
+ keyType := d.Get("type").(string)
+ exportable := d.Get("exportable").(bool)
+
+ if !derived && convergent {
+ return logical.ErrorResponse("convergent encryption requires derivation to be enabled"), nil
+ }
+
+ polReq := keysutil.PolicyRequest{
+ Storage: req.Storage,
+ Name: name,
+ Derived: derived,
+ Convergent: convergent,
+ Exportable: exportable,
+ }
+ switch keyType {
+ case "aes256-gcm96":
+ polReq.KeyType = keysutil.KeyType_AES256_GCM96
+ case "ecdsa-p256":
+ polReq.KeyType = keysutil.KeyType_ECDSA_P256
+ default:
+ return logical.ErrorResponse(fmt.Sprintf("unknown key type %v", keyType)), logical.ErrInvalidRequest
+ }
+
+ p, lock, upserted, err := b.lm.GetPolicyUpsert(polReq)
+ if lock != nil {
+ defer lock.RUnlock()
+ }
+ if err != nil {
+ return nil, err
+ }
+ if p == nil {
+ return nil, fmt.Errorf("error generating key: returned policy was nil")
+ }
+
+ resp := &logical.Response{}
+ if !upserted {
+ resp.AddWarning(fmt.Sprintf("key %s already existed", name))
+ }
+
+ return nil, nil
+}
+
+func (b *backend) pathPolicyRead(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ name := d.Get("name").(string)
+
+ p, lock, err := b.lm.GetPolicyShared(req.Storage, name)
+ if lock != nil {
+ defer lock.RUnlock()
+ }
+ if err != nil {
+ return nil, err
+ }
+ if p == nil {
+ return nil, nil
+ }
+
+ // Return the response
+ resp := &logical.Response{
+ Data: map[string]interface{}{
+ "name": p.Name,
+ "type": p.Type.String(),
+ "derived": p.Derived,
+ "deletion_allowed": p.DeletionAllowed,
+ "min_decryption_version": p.MinDecryptionVersion,
+ "latest_version": p.LatestVersion,
+ "exportable": p.Exportable,
+ "supports_encryption": p.Type.EncryptionSupported(),
+ "supports_decryption": p.Type.DecryptionSupported(),
+ "supports_signing": p.Type.SigningSupported(),
+ "supports_derivation": p.Type.DerivationSupported(),
+ },
+ }
+
+ if p.Derived {
+ switch p.KDF {
+ case keysutil.Kdf_hmac_sha256_counter:
+ resp.Data["kdf"] = "hmac-sha256-counter"
+ resp.Data["kdf_mode"] = "hmac-sha256-counter"
+ case keysutil.Kdf_hkdf_sha256:
+ resp.Data["kdf"] = "hkdf_sha256"
+ }
+ resp.Data["convergent_encryption"] = p.ConvergentEncryption
+ if p.ConvergentEncryption {
+ resp.Data["convergent_encryption_version"] = p.ConvergentVersion
+ }
+ }
+
+ switch p.Type {
+ case keysutil.KeyType_AES256_GCM96:
+ retKeys := map[string]int64{}
+ for k, v := range p.Keys {
+ retKeys[strconv.Itoa(k)] = v.CreationTime
+ }
+ resp.Data["keys"] = retKeys
+
+ case keysutil.KeyType_ECDSA_P256:
+ type ecdsaKey struct {
+ Name string `json:"name"`
+ PublicKey string `json:"public_key"`
+ }
+ retKeys := map[string]ecdsaKey{}
+ for k, v := range p.Keys {
+ retKeys[strconv.Itoa(k)] = ecdsaKey{
+ Name: elliptic.P256().Params().Name,
+ PublicKey: v.FormattedPublicKey,
+ }
+ }
+ resp.Data["keys"] = retKeys
+ }
+
+ return resp, nil
+}
+
+func (b *backend) pathPolicyDelete(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ name := d.Get("name").(string)
+
+ // Delete does its own locking
+ err := b.lm.DeletePolicy(req.Storage, name)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("error deleting policy %s: %s", name, err)), err
+ }
+
+ return nil, nil
+}
+
+const pathPolicyHelpSyn = `Managed named encryption keys`
+
+const pathPolicyHelpDesc = `
+This path is used to manage the named keys that are available.
+Doing a write with no value against a new named key will create
+it using a randomly generated key.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_random.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_random.go
new file mode 100644
index 0000000..f9190b7
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_random.go
@@ -0,0 +1,97 @@
+package transit
+
+import (
+ "encoding/base64"
+ "encoding/hex"
+ "fmt"
+ "strconv"
+
+ uuid "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func (b *backend) pathRandom() *framework.Path {
+ return &framework.Path{
+ Pattern: "random" + framework.OptionalParamRegex("urlbytes"),
+ Fields: map[string]*framework.FieldSchema{
+ "urlbytes": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "The number of bytes to generate (POST URL parameter)",
+ },
+
+ "bytes": &framework.FieldSchema{
+ Type: framework.TypeInt,
+ Default: 32,
+ Description: "The number of bytes to generate (POST body parameter). Defaults to 32 (256 bits).",
+ },
+
+ "format": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "base64",
+ Description: `Encoding format to use. Can be "hex" or "base64". Defaults to "base64".`,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathRandomWrite,
+ },
+
+ HelpSynopsis: pathRandomHelpSyn,
+ HelpDescription: pathRandomHelpDesc,
+ }
+}
+
+func (b *backend) pathRandomWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ bytes := 0
+ var err error
+ strBytes := d.Get("urlbytes").(string)
+ if strBytes != "" {
+ bytes, err = strconv.Atoi(strBytes)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("error parsing url-set byte count: %s", err)), nil
+ }
+ } else {
+ bytes = d.Get("bytes").(int)
+ }
+ format := d.Get("format").(string)
+
+ if bytes < 1 {
+ return logical.ErrorResponse(`"bytes" cannot be less than 1`), nil
+ }
+
+ switch format {
+ case "hex":
+ case "base64":
+ default:
+ return logical.ErrorResponse(fmt.Sprintf("unsupported encoding format %s; must be \"hex\" or \"base64\"", format)), nil
+ }
+
+ randBytes, err := uuid.GenerateRandomBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+
+ var retStr string
+ switch format {
+ case "hex":
+ retStr = hex.EncodeToString(randBytes)
+ case "base64":
+ retStr = base64.StdEncoding.EncodeToString(randBytes)
+ }
+
+ // Generate the response
+ resp := &logical.Response{
+ Data: map[string]interface{}{
+ "random_bytes": retStr,
+ },
+ }
+ return resp, nil
+}
+
+const pathRandomHelpSyn = `Generate random bytes`
+
+const pathRandomHelpDesc = `
+This function can be used to generate high-entropy random bytes.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_random_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_random_test.go
new file mode 100644
index 0000000..a2711ce
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_random_test.go
@@ -0,0 +1,98 @@
+package transit
+
+import (
+ "encoding/base64"
+ "encoding/hex"
+ "reflect"
+ "testing"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestTransit_Random(t *testing.T) {
+ var b *backend
+ sysView := logical.TestSystemView()
+ storage := &logical.InmemStorage{}
+
+ b = Backend(&logical.BackendConfig{
+ StorageView: storage,
+ System: sysView,
+ })
+
+ req := &logical.Request{
+ Storage: storage,
+ Operation: logical.UpdateOperation,
+ Path: "random",
+ Data: map[string]interface{}{},
+ }
+
+ doRequest := func(req *logical.Request, errExpected bool, format string, numBytes int) {
+ getResponse := func() []byte {
+ resp, err := b.HandleRequest(req)
+ if err != nil && !errExpected {
+ t.Fatal(err)
+ }
+ if resp == nil {
+ t.Fatal("expected non-nil response")
+ }
+ if errExpected {
+ if !resp.IsError() {
+ t.Fatalf("bad: got error response: %#v", *resp)
+ }
+ return nil
+ }
+ if resp.IsError() {
+ t.Fatalf("bad: got error response: %#v", *resp)
+ }
+ if _, ok := resp.Data["random_bytes"]; !ok {
+ t.Fatal("no random_bytes found in response")
+ }
+
+ outputStr := resp.Data["random_bytes"].(string)
+ var outputBytes []byte
+ switch format {
+ case "base64":
+ outputBytes, err = base64.StdEncoding.DecodeString(outputStr)
+ case "hex":
+ outputBytes, err = hex.DecodeString(outputStr)
+ default:
+ t.Fatal("unknown format")
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ return outputBytes
+ }
+
+ rand1 := getResponse()
+ // Expected error
+ if rand1 == nil {
+ return
+ }
+ rand2 := getResponse()
+ if len(rand1) != numBytes || len(rand2) != numBytes {
+ t.Fatal("length of output random bytes not what is exepcted")
+ }
+ if reflect.DeepEqual(rand1, rand2) {
+ t.Fatal("found identical ouputs")
+ }
+ }
+
+ // Test defaults
+ doRequest(req, false, "base64", 32)
+
+ // Test size selection in the path
+ req.Path = "random/24"
+ req.Data["format"] = "hex"
+ doRequest(req, false, "hex", 24)
+
+ // Test bad input/format
+ req.Path = "random"
+ req.Data["format"] = "base92"
+ doRequest(req, true, "", 0)
+
+ req.Data["format"] = "hex"
+ req.Data["bytes"] = -1
+ doRequest(req, true, "", 0)
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_rewrap.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_rewrap.go
new file mode 100644
index 0000000..167656a
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_rewrap.go
@@ -0,0 +1,179 @@
+package transit
+
+import (
+ "encoding/base64"
+ "fmt"
+
+ "github.com/hashicorp/vault/helper/errutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ "github.com/mitchellh/mapstructure"
+)
+
+func (b *backend) pathRewrap() *framework.Path {
+ return &framework.Path{
+ Pattern: "rewrap/" + framework.GenericNameRegex("name"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the key",
+ },
+
+ "ciphertext": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Ciphertext value to rewrap",
+ },
+
+ "context": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Base64 encoded context for key derivation. Required for derived keys.",
+ },
+
+ "nonce": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Nonce for when convergent encryption is used",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathRewrapWrite,
+ },
+
+ HelpSynopsis: pathRewrapHelpSyn,
+ HelpDescription: pathRewrapHelpDesc,
+ }
+}
+
+func (b *backend) pathRewrapWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ batchInputRaw := d.Raw["batch_input"]
+ var batchInputItems []BatchRequestItem
+ var err error
+ if batchInputRaw != nil {
+ err = mapstructure.Decode(batchInputRaw, &batchInputItems)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse batch input: %v", err)
+ }
+
+ if len(batchInputItems) == 0 {
+ return logical.ErrorResponse("missing batch input to process"), logical.ErrInvalidRequest
+ }
+ } else {
+ ciphertext := d.Get("ciphertext").(string)
+ if len(ciphertext) == 0 {
+ return logical.ErrorResponse("missing ciphertext to decrypt"), logical.ErrInvalidRequest
+ }
+
+ batchInputItems = make([]BatchRequestItem, 1)
+ batchInputItems[0] = BatchRequestItem{
+ Ciphertext: ciphertext,
+ Context: d.Get("context").(string),
+ Nonce: d.Get("nonce").(string),
+ }
+ }
+
+ batchResponseItems := make([]BatchResponseItem, len(batchInputItems))
+ contextSet := len(batchInputItems[0].Context) != 0
+
+ for i, item := range batchInputItems {
+ if (len(item.Context) == 0 && contextSet) || (len(item.Context) != 0 && !contextSet) {
+ return logical.ErrorResponse("context should be set either in all the request blocks or in none"), logical.ErrInvalidRequest
+ }
+
+ if item.Ciphertext == "" {
+ batchResponseItems[i].Error = "missing ciphertext to decrypt"
+ continue
+ }
+
+ // Decode the context
+ if len(item.Context) != 0 {
+ batchInputItems[i].DecodedContext, err = base64.StdEncoding.DecodeString(item.Context)
+ if err != nil {
+ batchResponseItems[i].Error = err.Error()
+ continue
+ }
+ }
+
+ // Decode the nonce
+ if len(item.Nonce) != 0 {
+ batchInputItems[i].DecodedNonce, err = base64.StdEncoding.DecodeString(item.Nonce)
+ if err != nil {
+ batchResponseItems[i].Error = err.Error()
+ continue
+ }
+ }
+ }
+
+ // Get the policy
+ p, lock, err := b.lm.GetPolicyShared(req.Storage, d.Get("name").(string))
+ if lock != nil {
+ defer lock.RUnlock()
+ }
+ if err != nil {
+ return nil, err
+ }
+ if p == nil {
+ return logical.ErrorResponse("policy not found"), logical.ErrInvalidRequest
+ }
+
+ for i, item := range batchInputItems {
+ if batchResponseItems[i].Error != "" {
+ continue
+ }
+
+ plaintext, err := p.Decrypt(item.DecodedContext, item.DecodedNonce, item.Ciphertext)
+ if err != nil {
+ switch err.(type) {
+ case errutil.UserError:
+ batchResponseItems[i].Error = err.Error()
+ continue
+ default:
+ return nil, err
+ }
+ }
+
+ ciphertext, err := p.Encrypt(item.DecodedContext, item.DecodedNonce, plaintext)
+ if err != nil {
+ switch err.(type) {
+ case errutil.UserError:
+ batchResponseItems[i].Error = err.Error()
+ continue
+ case errutil.InternalError:
+ return nil, err
+ default:
+ return nil, err
+ }
+ }
+
+ if ciphertext == "" {
+ return nil, fmt.Errorf("empty ciphertext returned for input item %d", i)
+ }
+
+ batchResponseItems[i].Ciphertext = ciphertext
+ }
+
+ resp := &logical.Response{}
+ if batchInputRaw != nil {
+ resp.Data = map[string]interface{}{
+ "batch_results": batchResponseItems,
+ }
+ } else {
+ if batchResponseItems[0].Error != "" {
+ return logical.ErrorResponse(batchResponseItems[0].Error), logical.ErrInvalidRequest
+ }
+ resp.Data = map[string]interface{}{
+ "ciphertext": batchResponseItems[0].Ciphertext,
+ }
+ }
+
+ return resp, nil
+}
+
+const pathRewrapHelpSyn = `Rewrap ciphertext`
+
+const pathRewrapHelpDesc = `
+After key rotation, this function can be used to rewrap the given ciphertext or
+a batch of given ciphertext blocks with the latest version of the named key.
+If the given ciphertext is already using the latest version of the key, this
+function is a no-op.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_rewrap_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_rewrap_test.go
new file mode 100644
index 0000000..ae4d002
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_rewrap_test.go
@@ -0,0 +1,294 @@
+package transit
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+// Check the normal flow of rewrap
+func TestTransit_BatchRewrapCase1(t *testing.T) {
+ var resp *logical.Response
+ var err error
+ b, s := createBackendWithStorage(t)
+
+ // Upsert the key and encrypt the data
+ plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA=="
+
+ encData := map[string]interface{}{
+ "plaintext": plaintext,
+ }
+
+ // Create a key and encrypt a plaintext
+ encReq := &logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "encrypt/upserted_key",
+ Storage: s,
+ Data: encData,
+ }
+ resp, err = b.HandleRequest(encReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ // Cache the ciphertext
+ ciphertext := resp.Data["ciphertext"]
+ if !strings.HasPrefix(ciphertext.(string), "vault:v1") {
+ t.Fatalf("bad: ciphertext version: expected: 'vault:v1', actual: %s", ciphertext)
+ }
+
+ rewrapData := map[string]interface{}{
+ "ciphertext": ciphertext,
+ }
+
+ // Read the policy and check if the latest version is 1
+ policyReq := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "keys/upserted_key",
+ Storage: s,
+ }
+
+ resp, err = b.HandleRequest(policyReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if resp.Data["latest_version"] != 1 {
+ t.Fatalf("bad: latest_version: expected: 1, actual: %d", resp.Data["latest_version"])
+ }
+
+ rotateReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "keys/upserted_key/rotate",
+ Storage: s,
+ }
+ resp, err = b.HandleRequest(rotateReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ // Read the policy again and the latest version is 2
+ resp, err = b.HandleRequest(policyReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if resp.Data["latest_version"] != 2 {
+ t.Fatalf("bad: latest_version: expected: 2, actual: %d", resp.Data["latest_version"])
+ }
+
+ // Rewrap the ciphertext and check that they are different
+ rewrapReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "rewrap/upserted_key",
+ Storage: s,
+ Data: rewrapData,
+ }
+
+ resp, err = b.HandleRequest(rewrapReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if ciphertext.(string) == resp.Data["ciphertext"].(string) {
+ t.Fatalf("bad: ciphertexts are same before and after rewrap")
+ }
+
+ if !strings.HasPrefix(resp.Data["ciphertext"].(string), "vault:v2") {
+ t.Fatalf("bad: ciphertext version: expected: 'vault:v2', actual: %s", resp.Data["ciphertext"].(string))
+ }
+}
+
+// Check the normal flow of rewrap with upserted key
+func TestTransit_BatchRewrapCase2(t *testing.T) {
+ var resp *logical.Response
+ var err error
+ b, s := createBackendWithStorage(t)
+
+ // Upsert the key and encrypt the data
+ plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA=="
+
+ encData := map[string]interface{}{
+ "plaintext": plaintext,
+ "context": "dmlzaGFsCg==",
+ }
+
+ // Create a key and encrypt a plaintext
+ encReq := &logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "encrypt/upserted_key",
+ Storage: s,
+ Data: encData,
+ }
+ resp, err = b.HandleRequest(encReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ // Cache the ciphertext
+ ciphertext := resp.Data["ciphertext"]
+ if !strings.HasPrefix(ciphertext.(string), "vault:v1") {
+ t.Fatalf("bad: ciphertext version: expected: 'vault:v1', actual: %s", ciphertext)
+ }
+
+ rewrapData := map[string]interface{}{
+ "ciphertext": ciphertext,
+ "context": "dmlzaGFsCg==",
+ }
+
+ // Read the policy and check if the latest version is 1
+ policyReq := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "keys/upserted_key",
+ Storage: s,
+ }
+
+ resp, err = b.HandleRequest(policyReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if resp.Data["latest_version"] != 1 {
+ t.Fatalf("bad: latest_version: expected: 1, actual: %d", resp.Data["latest_version"])
+ }
+
+ rotateReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "keys/upserted_key/rotate",
+ Storage: s,
+ }
+ resp, err = b.HandleRequest(rotateReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ // Read the policy again and the latest version is 2
+ resp, err = b.HandleRequest(policyReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if resp.Data["latest_version"] != 2 {
+ t.Fatalf("bad: latest_version: expected: 2, actual: %d", resp.Data["latest_version"])
+ }
+
+ // Rewrap the ciphertext and check that they are different
+ rewrapReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "rewrap/upserted_key",
+ Storage: s,
+ Data: rewrapData,
+ }
+
+ resp, err = b.HandleRequest(rewrapReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ if ciphertext.(string) == resp.Data["ciphertext"].(string) {
+ t.Fatalf("bad: ciphertexts are same before and after rewrap")
+ }
+
+ if !strings.HasPrefix(resp.Data["ciphertext"].(string), "vault:v2") {
+ t.Fatalf("bad: ciphertext version: expected: 'vault:v2', actual: %s", resp.Data["ciphertext"].(string))
+ }
+}
+
+// Batch encrypt plaintexts, rotate the keys and rewrap all the ciphertexts
+func TestTransit_BatchRewrapCase3(t *testing.T) {
+ var resp *logical.Response
+ var err error
+
+ b, s := createBackendWithStorage(t)
+
+ batchEncryptionInput := []interface{}{
+ map[string]interface{}{"plaintext": "dmlzaGFsCg=="},
+ map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="},
+ }
+ batchEncryptionData := map[string]interface{}{
+ "batch_input": batchEncryptionInput,
+ }
+ batchReq := &logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "encrypt/upserted_key",
+ Storage: s,
+ Data: batchEncryptionData,
+ }
+ resp, err = b.HandleRequest(batchReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ batchEncryptionResponseItems := resp.Data["batch_results"].([]BatchResponseItem)
+
+ batchRewrapInput := make([]interface{}, len(batchEncryptionResponseItems))
+ for i, item := range batchEncryptionResponseItems {
+ batchRewrapInput[i] = map[string]interface{}{"ciphertext": item.Ciphertext}
+ }
+
+ batchRewrapData := map[string]interface{}{
+ "batch_input": batchRewrapInput,
+ }
+
+ rotateReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "keys/upserted_key/rotate",
+ Storage: s,
+ }
+ resp, err = b.HandleRequest(rotateReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ rewrapReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "rewrap/upserted_key",
+ Storage: s,
+ Data: batchRewrapData,
+ }
+
+ resp, err = b.HandleRequest(rewrapReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ batchRewrapResponseItems := resp.Data["batch_results"].([]BatchResponseItem)
+
+ if len(batchRewrapResponseItems) != len(batchEncryptionResponseItems) {
+ t.Fatalf("bad: length of input and output or rewrap are not matching; expected: %d, actual: %d", len(batchEncryptionResponseItems), len(batchRewrapResponseItems))
+ }
+
+ decReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "decrypt/upserted_key",
+ Storage: s,
+ }
+
+ for i, eItem := range batchEncryptionResponseItems {
+ rItem := batchRewrapResponseItems[i]
+
+ if eItem.Ciphertext == rItem.Ciphertext {
+ t.Fatalf("bad: rewrap input and output are the same")
+ }
+
+ if !strings.HasPrefix(rItem.Ciphertext, "vault:v2") {
+ t.Fatalf("bad: invalid version of ciphertext in rewrap response; expected: 'vault:v2', actual: %s", rItem.Ciphertext)
+ }
+
+ decReq.Data = map[string]interface{}{
+ "ciphertext": rItem.Ciphertext,
+ }
+
+ resp, err = b.HandleRequest(decReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ plaintext1 := "dGhlIHF1aWNrIGJyb3duIGZveA=="
+ plaintext2 := "dmlzaGFsCg=="
+ if resp.Data["plaintext"] != plaintext1 && resp.Data["plaintext"] != plaintext2 {
+ t.Fatalf("bad: plaintext. Expected: %q or %q, Actual: %q", plaintext1, plaintext2, resp.Data["plaintext"])
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_rotate.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_rotate.go
new file mode 100644
index 0000000..743fcf2
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_rotate.go
@@ -0,0 +1,55 @@
+package transit
+
+import (
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func (b *backend) pathRotate() *framework.Path {
+ return &framework.Path{
+ Pattern: "keys/" + framework.GenericNameRegex("name") + "/rotate",
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the key",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathRotateWrite,
+ },
+
+ HelpSynopsis: pathRotateHelpSyn,
+ HelpDescription: pathRotateHelpDesc,
+ }
+}
+
+func (b *backend) pathRotateWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ name := d.Get("name").(string)
+
+ // Get the policy
+ p, lock, err := b.lm.GetPolicyExclusive(req.Storage, name)
+ if lock != nil {
+ defer lock.Unlock()
+ }
+ if err != nil {
+ return nil, err
+ }
+ if p == nil {
+ return logical.ErrorResponse("key not found"), logical.ErrInvalidRequest
+ }
+
+ // Rotate the policy
+ err = p.Rotate(req.Storage)
+
+ return nil, err
+}
+
+const pathRotateHelpSyn = `Rotate named encryption key`
+
+const pathRotateHelpDesc = `
+This path is used to rotate the named key. After rotation,
+new encryption requests using this name will use the new key,
+but decryption will still be supported for older versions.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_sign_verify.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_sign_verify.go
new file mode 100644
index 0000000..ff01880
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_sign_verify.go
@@ -0,0 +1,258 @@
+package transit
+
+import (
+ "crypto/sha256"
+ "crypto/sha512"
+ "encoding/base64"
+ "fmt"
+ "hash"
+
+ "github.com/hashicorp/vault/helper/errutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func (b *backend) pathSign() *framework.Path {
+ return &framework.Path{
+ Pattern: "sign/" + framework.GenericNameRegex("name") + framework.OptionalParamRegex("urlalgorithm"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "The key to use",
+ },
+
+ "input": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "The base64-encoded input data",
+ },
+
+ "algorithm": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "sha2-256",
+ Description: `Hash algorithm to use (POST body parameter). Valid values are:
+
+* sha2-224
+* sha2-256
+* sha2-384
+* sha2-512
+
+Defaults to "sha2-256".`,
+ },
+
+ "urlalgorithm": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `Hash algorithm to use (POST URL parameter)`,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathSignWrite,
+ },
+
+ HelpSynopsis: pathSignHelpSyn,
+ HelpDescription: pathSignHelpDesc,
+ }
+}
+
+func (b *backend) pathVerify() *framework.Path {
+ return &framework.Path{
+ Pattern: "verify/" + framework.GenericNameRegex("name") + framework.OptionalParamRegex("urlalgorithm"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "The key to use",
+ },
+
+ "signature": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "The signature, including vault header/key version",
+ },
+
+ "hmac": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "The HMAC, including vault header/key version",
+ },
+
+ "input": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "The base64-encoded input data to verify",
+ },
+
+ "urlalgorithm": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `Hash algorithm to use (POST URL parameter)`,
+ },
+
+ "algorithm": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "sha2-256",
+ Description: `Hash algorithm to use (POST body parameter). Valid values are:
+
+* sha2-224
+* sha2-256
+* sha2-384
+* sha2-512
+
+Defaults to "sha2-256".`,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathVerifyWrite,
+ },
+
+ HelpSynopsis: pathVerifyHelpSyn,
+ HelpDescription: pathVerifyHelpDesc,
+ }
+}
+
+func (b *backend) pathSignWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ name := d.Get("name").(string)
+ inputB64 := d.Get("input").(string)
+ algorithm := d.Get("urlalgorithm").(string)
+ if algorithm == "" {
+ algorithm = d.Get("algorithm").(string)
+ }
+
+ input, err := base64.StdEncoding.DecodeString(inputB64)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("unable to decode input as base64: %s", err)), logical.ErrInvalidRequest
+ }
+
+ var hf hash.Hash
+ switch algorithm {
+ case "sha2-224":
+ hf = sha256.New224()
+ case "sha2-256":
+ hf = sha256.New()
+ case "sha2-384":
+ hf = sha512.New384()
+ case "sha2-512":
+ hf = sha512.New()
+ default:
+ return logical.ErrorResponse(fmt.Sprintf("unsupported algorithm %s", algorithm)), nil
+ }
+ hf.Write(input)
+ hashedInput := hf.Sum(nil)
+
+ // Get the policy
+ p, lock, err := b.lm.GetPolicyShared(req.Storage, name)
+ if lock != nil {
+ defer lock.RUnlock()
+ }
+ if err != nil {
+ return nil, err
+ }
+ if p == nil {
+ return logical.ErrorResponse("policy not found"), logical.ErrInvalidRequest
+ }
+
+ if !p.Type.SigningSupported() {
+ return logical.ErrorResponse(fmt.Sprintf("key type %v does not support signing", p.Type)), logical.ErrInvalidRequest
+ }
+
+ sig, err := p.Sign(hashedInput)
+ if err != nil {
+ return nil, err
+ }
+ if sig == "" {
+ return nil, fmt.Errorf("signature could not be computed")
+ }
+
+ // Generate the response
+ resp := &logical.Response{
+ Data: map[string]interface{}{
+ "signature": sig,
+ },
+ }
+ return resp, nil
+}
+
+func (b *backend) pathVerifyWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+
+ sig := d.Get("signature").(string)
+ hmac := d.Get("hmac").(string)
+ switch {
+ case sig != "" && hmac != "":
+ return logical.ErrorResponse("provide one of 'signature' or 'hmac'"), logical.ErrInvalidRequest
+
+ case sig == "" && hmac == "":
+ return logical.ErrorResponse("neither a 'signature' nor an 'hmac' were given to verify"), logical.ErrInvalidRequest
+
+ case hmac != "":
+ return b.pathHMACVerify(req, d, hmac)
+ }
+
+ name := d.Get("name").(string)
+ inputB64 := d.Get("input").(string)
+ algorithm := d.Get("urlalgorithm").(string)
+ if algorithm == "" {
+ algorithm = d.Get("algorithm").(string)
+ }
+
+ input, err := base64.StdEncoding.DecodeString(inputB64)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("unable to decode input as base64: %s", err)), logical.ErrInvalidRequest
+ }
+
+ var hf hash.Hash
+ switch algorithm {
+ case "sha2-224":
+ hf = sha256.New224()
+ case "sha2-256":
+ hf = sha256.New()
+ case "sha2-384":
+ hf = sha512.New384()
+ case "sha2-512":
+ hf = sha512.New()
+ default:
+ return logical.ErrorResponse(fmt.Sprintf("unsupported algorithm %s", algorithm)), nil
+ }
+ hf.Write(input)
+ hashedInput := hf.Sum(nil)
+
+ // Get the policy
+ p, lock, err := b.lm.GetPolicyShared(req.Storage, name)
+ if lock != nil {
+ defer lock.RUnlock()
+ }
+ if err != nil {
+ return nil, err
+ }
+ if p == nil {
+ return logical.ErrorResponse("policy not found"), logical.ErrInvalidRequest
+ }
+
+ valid, err := p.VerifySignature(hashedInput, sig)
+ if err != nil {
+ switch err.(type) {
+ case errutil.UserError:
+ return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
+ case errutil.InternalError:
+ return nil, err
+ default:
+ return nil, err
+ }
+ }
+
+ // Generate the response
+ resp := &logical.Response{
+ Data: map[string]interface{}{
+ "valid": valid,
+ },
+ }
+ return resp, nil
+}
+
+const pathSignHelpSyn = `Generate a signature for input data using the named key`
+
+const pathSignHelpDesc = `
+Generates a signature of the input data using the named key and the given hash algorithm.
+`
+const pathVerifyHelpSyn = `Verify a signature or HMAC for input data created using the named key`
+
+const pathVerifyHelpDesc = `
+Verifies a signature or HMAC of the input data using the named key and the given hash algorithm.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_sign_verify_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_sign_verify_test.go
new file mode 100644
index 0000000..3a41c28
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_sign_verify_test.go
@@ -0,0 +1,201 @@
+package transit
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestTransit_SignVerify(t *testing.T) {
+ var b *backend
+ sysView := logical.TestSystemView()
+ storage := &logical.InmemStorage{}
+
+ b = Backend(&logical.BackendConfig{
+ StorageView: storage,
+ System: sysView,
+ })
+
+ // First create a key
+ req := &logical.Request{
+ Storage: storage,
+ Operation: logical.UpdateOperation,
+ Path: "keys/foo",
+ Data: map[string]interface{}{
+ "type": "ecdsa-p256",
+ },
+ }
+ _, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Now, change the key value to something we control
+ p, lock, err := b.lm.GetPolicyShared(storage, "foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ // We don't care as we're the only one using this
+ lock.RUnlock()
+
+ // Useful code to output a key for openssl verification
+ /*
+ {
+ key := p.Keys[p.LatestVersion]
+ keyBytes, _ := x509.MarshalECPrivateKey(&ecdsa.PrivateKey{
+ PublicKey: ecdsa.PublicKey{
+ Curve: elliptic.P256(),
+ X: key.X,
+ Y: key.Y,
+ },
+ D: key.D,
+ })
+ pemBlock := &pem.Block{
+ Type: "EC PRIVATE KEY",
+ Bytes: keyBytes,
+ }
+ pemBytes := pem.EncodeToMemory(pemBlock)
+ t.Fatalf("X: %s, Y: %s, D: %s, marshaled: %s", key.X.Text(16), key.Y.Text(16), key.D.Text(16), string(pemBytes))
+ }
+ */
+
+ keyEntry := p.Keys[p.LatestVersion]
+ _, ok := keyEntry.EC_X.SetString("7336010a6da5935113d26d9ea4bb61b3b8d102c9a8083ed432f9b58fd7e80686", 16)
+ if !ok {
+ t.Fatal("could not set X")
+ }
+ _, ok = keyEntry.EC_Y.SetString("4040aa31864691a8a9e7e3ec9250e85425b797ad7be34ba8df62bfbad45ebb0e", 16)
+ if !ok {
+ t.Fatal("could not set Y")
+ }
+ _, ok = keyEntry.EC_D.SetString("99e5569be8683a2691dfc560ca9dfa71e887867a3af60635a08a3e3655aba3ef", 16)
+ if !ok {
+ t.Fatal("could not set D")
+ }
+ p.Keys[p.LatestVersion] = keyEntry
+ if err = p.Persist(storage); err != nil {
+ t.Fatal(err)
+ }
+ req.Data = map[string]interface{}{
+ "input": "dGhlIHF1aWNrIGJyb3duIGZveA==",
+ }
+
+ signRequest := func(req *logical.Request, errExpected bool, postpath string) string {
+ req.Path = "sign/foo" + postpath
+ resp, err := b.HandleRequest(req)
+ if err != nil && !errExpected {
+ t.Fatal(err)
+ }
+ if resp == nil {
+ t.Fatal("expected non-nil response")
+ }
+ if errExpected {
+ if !resp.IsError() {
+ t.Fatalf("bad: got error response: %#v", *resp)
+ }
+ return ""
+ }
+ if resp.IsError() {
+ t.Fatalf("bad: got error response: %#v", *resp)
+ }
+ value, ok := resp.Data["signature"]
+ if !ok {
+ t.Fatalf("no signature key found in returned data, got resp data %#v", resp.Data)
+ }
+ return value.(string)
+ }
+
+ verifyRequest := func(req *logical.Request, errExpected bool, postpath, sig string) {
+ req.Path = "verify/foo" + postpath
+ req.Data["signature"] = sig
+ resp, err := b.HandleRequest(req)
+ if err != nil && !errExpected {
+ t.Fatalf("got error: %v, sig was %v", err, sig)
+ }
+ if errExpected {
+ if resp != nil && !resp.IsError() {
+ t.Fatalf("bad: got error response: %#v", *resp)
+ }
+ return
+ }
+ if resp == nil {
+ t.Fatal("expected non-nil response")
+ }
+ if resp.IsError() {
+ t.Fatalf("bad: got error response: %#v", *resp)
+ }
+ value, ok := resp.Data["valid"]
+ if !ok {
+ t.Fatalf("no valid key found in returned data, got resp data %#v", resp.Data)
+ }
+ if !value.(bool) && !errExpected {
+ t.Fatalf("verification failed; req was %#v, resp is %#v", *req, *resp)
+ }
+ }
+
+ // Comparisons are against values generated via openssl
+
+ // Test defaults -- sha2-256
+ sig := signRequest(req, false, "")
+ verifyRequest(req, false, "", sig)
+
+ // Test a bad signature
+ verifyRequest(req, true, "", sig[0:len(sig)-2])
+
+ // Test a signature generated with the same key by openssl
+ sig = `vault:v1:MEUCIAgnEl9V8P305EBAlz68Nq4jZng5fE8k6MactcnlUw9dAiEAvJVePg3dazW6MaW7lRAVtEz82QJDVmR98tXCl8Pc7DA=`
+ verifyRequest(req, false, "", sig)
+
+ // Test algorithm selection in the path
+ sig = signRequest(req, false, "/sha2-224")
+ verifyRequest(req, false, "/sha2-224", sig)
+
+ // Reset and test algorithm selection in the data
+ req.Data["algorithm"] = "sha2-224"
+ sig = signRequest(req, false, "")
+ verifyRequest(req, false, "", sig)
+
+ req.Data["algorithm"] = "sha2-384"
+ sig = signRequest(req, false, "")
+ verifyRequest(req, false, "", sig)
+
+ // Test 512 and save sig for later to ensure we can't validate once min
+ // decryption version is set
+ req.Data["algorithm"] = "sha2-512"
+ sig = signRequest(req, false, "")
+ verifyRequest(req, false, "", sig)
+
+ v1sig := sig
+
+ // Test bad algorithm
+ req.Data["algorithm"] = "foobar"
+ signRequest(req, true, "")
+
+ // Test bad input
+ req.Data["algorithm"] = "sha2-256"
+ req.Data["input"] = "foobar"
+ signRequest(req, true, "")
+
+ // Rotate and set min decryption version
+ err = p.Rotate(storage)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = p.Rotate(storage)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ p.MinDecryptionVersion = 2
+ if err = p.Persist(storage); err != nil {
+ t.Fatal(err)
+ }
+
+ req.Data["input"] = "dGhlIHF1aWNrIGJyb3duIGZveA=="
+ req.Data["algorithm"] = "sha2-256"
+ // Make sure signing still works fine
+ sig = signRequest(req, false, "")
+ verifyRequest(req, false, "", sig)
+ // Now try the v1
+ verifyRequest(req, true, "", v1sig)
+}
diff --git a/vendor/github.com/hashicorp/vault/cli/commands.go b/vendor/github.com/hashicorp/vault/cli/commands.go
new file mode 100644
index 0000000..7494c06
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/cli/commands.go
@@ -0,0 +1,334 @@
+package cli
+
+import (
+ "os"
+
+ auditFile "github.com/hashicorp/vault/builtin/audit/file"
+ auditSocket "github.com/hashicorp/vault/builtin/audit/socket"
+ auditSyslog "github.com/hashicorp/vault/builtin/audit/syslog"
+ "github.com/hashicorp/vault/version"
+
+ credAppId "github.com/hashicorp/vault/builtin/credential/app-id"
+ credAppRole "github.com/hashicorp/vault/builtin/credential/approle"
+ credAws "github.com/hashicorp/vault/builtin/credential/aws"
+ credCert "github.com/hashicorp/vault/builtin/credential/cert"
+ credGitHub "github.com/hashicorp/vault/builtin/credential/github"
+ credLdap "github.com/hashicorp/vault/builtin/credential/ldap"
+ credOkta "github.com/hashicorp/vault/builtin/credential/okta"
+ credRadius "github.com/hashicorp/vault/builtin/credential/radius"
+ credUserpass "github.com/hashicorp/vault/builtin/credential/userpass"
+
+ "github.com/hashicorp/vault/builtin/logical/aws"
+ "github.com/hashicorp/vault/builtin/logical/cassandra"
+ "github.com/hashicorp/vault/builtin/logical/consul"
+ "github.com/hashicorp/vault/builtin/logical/mongodb"
+ "github.com/hashicorp/vault/builtin/logical/mssql"
+ "github.com/hashicorp/vault/builtin/logical/mysql"
+ "github.com/hashicorp/vault/builtin/logical/pki"
+ "github.com/hashicorp/vault/builtin/logical/postgresql"
+ "github.com/hashicorp/vault/builtin/logical/rabbitmq"
+ "github.com/hashicorp/vault/builtin/logical/ssh"
+ "github.com/hashicorp/vault/builtin/logical/transit"
+
+ "github.com/hashicorp/vault/audit"
+ "github.com/hashicorp/vault/command"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/meta"
+ "github.com/mitchellh/cli"
+)
+
+// Commands returns the mapping of CLI commands for Vault. The meta
+// parameter lets you set meta options for all commands.
+func Commands(metaPtr *meta.Meta) map[string]cli.CommandFactory {
+ if metaPtr == nil {
+ metaPtr = &meta.Meta{
+ TokenHelper: command.DefaultTokenHelper,
+ }
+ }
+
+ if metaPtr.Ui == nil {
+ metaPtr.Ui = &cli.BasicUi{
+ Writer: os.Stdout,
+ ErrorWriter: os.Stderr,
+ }
+ }
+
+ return map[string]cli.CommandFactory{
+ "init": func() (cli.Command, error) {
+ return &command.InitCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "server": func() (cli.Command, error) {
+ return &command.ServerCommand{
+ Meta: *metaPtr,
+ AuditBackends: map[string]audit.Factory{
+ "file": auditFile.Factory,
+ "syslog": auditSyslog.Factory,
+ "socket": auditSocket.Factory,
+ },
+ CredentialBackends: map[string]logical.Factory{
+ "approle": credAppRole.Factory,
+ "cert": credCert.Factory,
+ "aws": credAws.Factory,
+ "app-id": credAppId.Factory,
+ "github": credGitHub.Factory,
+ "userpass": credUserpass.Factory,
+ "ldap": credLdap.Factory,
+ "okta": credOkta.Factory,
+ "radius": credRadius.Factory,
+ },
+ LogicalBackends: map[string]logical.Factory{
+ "aws": aws.Factory,
+ "consul": consul.Factory,
+ "postgresql": postgresql.Factory,
+ "cassandra": cassandra.Factory,
+ "pki": pki.Factory,
+ "transit": transit.Factory,
+ "mongodb": mongodb.Factory,
+ "mssql": mssql.Factory,
+ "mysql": mysql.Factory,
+ "ssh": ssh.Factory,
+ "rabbitmq": rabbitmq.Factory,
+ },
+ ShutdownCh: command.MakeShutdownCh(),
+ SighupCh: command.MakeSighupCh(),
+ }, nil
+ },
+
+ "ssh": func() (cli.Command, error) {
+ return &command.SSHCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "path-help": func() (cli.Command, error) {
+ return &command.PathHelpCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "auth": func() (cli.Command, error) {
+ return &command.AuthCommand{
+ Meta: *metaPtr,
+ Handlers: map[string]command.AuthHandler{
+ "github": &credGitHub.CLIHandler{},
+ "userpass": &credUserpass.CLIHandler{DefaultMount: "userpass"},
+ "ldap": &credLdap.CLIHandler{},
+ "okta": &credOkta.CLIHandler{},
+ "cert": &credCert.CLIHandler{},
+ "aws": &credAws.CLIHandler{},
+ "radius": &credUserpass.CLIHandler{DefaultMount: "radius"},
+ },
+ }, nil
+ },
+
+ "auth-enable": func() (cli.Command, error) {
+ return &command.AuthEnableCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "auth-disable": func() (cli.Command, error) {
+ return &command.AuthDisableCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "audit-list": func() (cli.Command, error) {
+ return &command.AuditListCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "audit-disable": func() (cli.Command, error) {
+ return &command.AuditDisableCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "audit-enable": func() (cli.Command, error) {
+ return &command.AuditEnableCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "key-status": func() (cli.Command, error) {
+ return &command.KeyStatusCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "policies": func() (cli.Command, error) {
+ return &command.PolicyListCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "policy-delete": func() (cli.Command, error) {
+ return &command.PolicyDeleteCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "policy-write": func() (cli.Command, error) {
+ return &command.PolicyWriteCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "read": func() (cli.Command, error) {
+ return &command.ReadCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "unwrap": func() (cli.Command, error) {
+ return &command.UnwrapCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "list": func() (cli.Command, error) {
+ return &command.ListCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "write": func() (cli.Command, error) {
+ return &command.WriteCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "delete": func() (cli.Command, error) {
+ return &command.DeleteCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "rekey": func() (cli.Command, error) {
+ return &command.RekeyCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "generate-root": func() (cli.Command, error) {
+ return &command.GenerateRootCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "renew": func() (cli.Command, error) {
+ return &command.RenewCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "revoke": func() (cli.Command, error) {
+ return &command.RevokeCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "seal": func() (cli.Command, error) {
+ return &command.SealCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "status": func() (cli.Command, error) {
+ return &command.StatusCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "unseal": func() (cli.Command, error) {
+ return &command.UnsealCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "step-down": func() (cli.Command, error) {
+ return &command.StepDownCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "mount": func() (cli.Command, error) {
+ return &command.MountCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "mounts": func() (cli.Command, error) {
+ return &command.MountsCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "mount-tune": func() (cli.Command, error) {
+ return &command.MountTuneCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "remount": func() (cli.Command, error) {
+ return &command.RemountCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "rotate": func() (cli.Command, error) {
+ return &command.RotateCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "unmount": func() (cli.Command, error) {
+ return &command.UnmountCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "token-create": func() (cli.Command, error) {
+ return &command.TokenCreateCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "token-lookup": func() (cli.Command, error) {
+ return &command.TokenLookupCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "token-renew": func() (cli.Command, error) {
+ return &command.TokenRenewCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "token-revoke": func() (cli.Command, error) {
+ return &command.TokenRevokeCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "capabilities": func() (cli.Command, error) {
+ return &command.CapabilitiesCommand{
+ Meta: *metaPtr,
+ }, nil
+ },
+
+ "version": func() (cli.Command, error) {
+ versionInfo := version.GetVersion()
+
+ return &command.VersionCommand{
+ VersionInfo: versionInfo,
+ Ui: metaPtr.Ui,
+ }, nil
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/cli/help.go b/vendor/github.com/hashicorp/vault/cli/help.go
new file mode 100644
index 0000000..bd66e33
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/cli/help.go
@@ -0,0 +1,82 @@
+package cli
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/mitchellh/cli"
+)
+
+// HelpFunc is a cli.HelpFunc that can is used to output the help for Vault.
+func HelpFunc(commands map[string]cli.CommandFactory) string {
+ commonNames := map[string]struct{}{
+ "delete": struct{}{},
+ "path-help": struct{}{},
+ "read": struct{}{},
+ "renew": struct{}{},
+ "revoke": struct{}{},
+ "write": struct{}{},
+ "server": struct{}{},
+ "status": struct{}{},
+ "unwrap": struct{}{},
+ }
+
+ // Determine the maximum key length, and classify based on type
+ commonCommands := make(map[string]cli.CommandFactory)
+ otherCommands := make(map[string]cli.CommandFactory)
+ maxKeyLen := 0
+ for key, f := range commands {
+ if len(key) > maxKeyLen {
+ maxKeyLen = len(key)
+ }
+
+ if _, ok := commonNames[key]; ok {
+ commonCommands[key] = f
+ } else {
+ otherCommands[key] = f
+ }
+ }
+
+ var buf bytes.Buffer
+ buf.WriteString("usage: vault [-version] [-help] [args]\n\n")
+ buf.WriteString("Common commands:\n")
+ buf.WriteString(listCommands(commonCommands, maxKeyLen))
+ buf.WriteString("\nAll other commands:\n")
+ buf.WriteString(listCommands(otherCommands, maxKeyLen))
+ return buf.String()
+}
+
+// listCommands just lists the commands in the map with the
+// given maximum key length.
+func listCommands(commands map[string]cli.CommandFactory, maxKeyLen int) string {
+ var buf bytes.Buffer
+
+ // Get the list of keys so we can sort them, and also get the maximum
+ // key length so they can be aligned properly.
+ keys := make([]string, 0, len(commands))
+ for key, _ := range commands {
+ keys = append(keys, key)
+ }
+ sort.Strings(keys)
+
+ for _, key := range keys {
+ commandFunc, ok := commands[key]
+ if !ok {
+ // This should never happen since we JUST built the list of
+ // keys.
+ panic("command not found: " + key)
+ }
+
+ command, err := commandFunc()
+ if err != nil {
+ panic(fmt.Sprintf("command '%s' failed to load: %s", key, err))
+ }
+
+ key = fmt.Sprintf("%s%s", key, strings.Repeat(" ", maxKeyLen-len(key)))
+ buf.WriteString(fmt.Sprintf(" %s %s\n", key, command.Synopsis()))
+ }
+
+ return buf.String()
+}
diff --git a/vendor/github.com/hashicorp/vault/cli/main.go b/vendor/github.com/hashicorp/vault/cli/main.go
new file mode 100644
index 0000000..3d0ced3
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/cli/main.go
@@ -0,0 +1,51 @@
+package cli
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/mitchellh/cli"
+)
+
+func Run(args []string) int {
+ return RunCustom(args, Commands(nil))
+}
+
+func RunCustom(args []string, commands map[string]cli.CommandFactory) int {
+ // Get the command line args. We shortcut "--version" and "-v" to
+ // just show the version.
+ for _, arg := range args {
+ if arg == "-v" || arg == "-version" || arg == "--version" {
+ newArgs := make([]string, len(args)+1)
+ newArgs[0] = "version"
+ copy(newArgs[1:], args)
+ args = newArgs
+ break
+ }
+ }
+
+ // Build the commands to include in the help now. This is pretty...
+ // tedious, but we don't have a better way at the moment.
+ commandsInclude := make([]string, 0, len(commands))
+ for k, _ := range commands {
+ switch k {
+ case "token-disk":
+ default:
+ commandsInclude = append(commandsInclude, k)
+ }
+ }
+
+ cli := &cli.CLI{
+ Args: args,
+ Commands: commands,
+ HelpFunc: cli.FilteredHelpFunc(commandsInclude, HelpFunc),
+ }
+
+ exitCode, err := cli.Run()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error executing CLI: %s\n", err.Error())
+ return 1
+ }
+
+ return exitCode
+}
diff --git a/vendor/github.com/hashicorp/vault/command/audit_disable.go b/vendor/github.com/hashicorp/vault/command/audit_disable.go
new file mode 100644
index 0000000..31c4457
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/audit_disable.go
@@ -0,0 +1,71 @@
+package command
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/meta"
+)
+
+// AuditDisableCommand is a Command that mounts a new mount.
+type AuditDisableCommand struct {
+ meta.Meta
+}
+
+func (c *AuditDisableCommand) Run(args []string) int {
+ flags := c.Meta.FlagSet("mount", meta.FlagSetDefault)
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ args = flags.Args()
+ if len(args) != 1 {
+ flags.Usage()
+ c.Ui.Error(fmt.Sprintf(
+ "\naudit-disable expects one argument: the id to disable"))
+ return 1
+ }
+
+ id := args[0]
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ if err := client.Sys().DisableAudit(id); err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error disabling audit backend: %s", err))
+ return 2
+ }
+
+ c.Ui.Output(fmt.Sprintf(
+ "Successfully disabled audit backend '%s' if it was enabled", id))
+
+ return 0
+}
+
+func (c *AuditDisableCommand) Synopsis() string {
+ return "Disable an audit backend"
+}
+
+func (c *AuditDisableCommand) Help() string {
+ helpText := `
+Usage: vault audit-disable [options] id
+
+ Disable an audit backend.
+
+ Once the audit backend is disabled no more audit logs will be sent to
+ it. The data associated with the audit backend isn't affected.
+
+ The "id" parameter should map to the "path" used in "audit-enable". If
+ no path was provided to "audit-enable" you should use the backend
+ type (e.g. "file").
+
+General Options:
+` + meta.GeneralOptionsUsage()
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/audit_disable_test.go b/vendor/github.com/hashicorp/vault/command/audit_disable_test.go
new file mode 100644
index 0000000..500ee9c
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/audit_disable_test.go
@@ -0,0 +1,86 @@
+package command
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestAuditDisable(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &AuditDisableCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "noop",
+ }
+
+ // Run once to get the client
+ c.Run(args)
+
+ // Get the client
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %#v", err)
+ }
+ if err := client.Sys().EnableAudit("noop", "noop", "", nil); err != nil {
+ t.Fatalf("err: %#v", err)
+ }
+
+ // Run again
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+}
+
+func TestAuditDisableWithOptions(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &AuditDisableCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "noop",
+ }
+
+ // Run once to get the client
+ c.Run(args)
+
+ // Get the client
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %#v", err)
+ }
+ if err := client.Sys().EnableAuditWithOptions("noop", &api.EnableAuditOptions{
+ Type: "noop",
+ Description: "noop",
+ }); err != nil {
+ t.Fatalf("err: %#v", err)
+ }
+
+ // Run again
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/audit_enable.go b/vendor/github.com/hashicorp/vault/command/audit_enable.go
new file mode 100644
index 0000000..3293c79
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/audit_enable.go
@@ -0,0 +1,129 @@
+package command
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/helper/kv-builder"
+ "github.com/hashicorp/vault/meta"
+ "github.com/mitchellh/mapstructure"
+)
+
+// AuditEnableCommand is a Command that mounts a new mount.
+type AuditEnableCommand struct {
+ meta.Meta
+
+ // A test stdin that can be used for tests
+ testStdin io.Reader
+}
+
+func (c *AuditEnableCommand) Run(args []string) int {
+ var desc, path string
+ var local bool
+ flags := c.Meta.FlagSet("audit-enable", meta.FlagSetDefault)
+ flags.StringVar(&desc, "description", "", "")
+ flags.StringVar(&path, "path", "", "")
+ flags.BoolVar(&local, "local", false, "")
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ args = flags.Args()
+ if len(args) < 1 {
+ flags.Usage()
+ c.Ui.Error(fmt.Sprintf(
+ "\naudit-enable expects at least one argument: the type to enable"))
+ return 1
+ }
+
+ auditType := args[0]
+ if path == "" {
+ path = auditType
+ }
+
+ // Build the options
+ var stdin io.Reader = os.Stdin
+ if c.testStdin != nil {
+ stdin = c.testStdin
+ }
+ builder := &kvbuilder.Builder{Stdin: stdin}
+ if err := builder.Add(args[1:]...); err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error parsing options: %s", err))
+ return 1
+ }
+
+ var opts map[string]string
+ if err := mapstructure.WeakDecode(builder.Map(), &opts); err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error parsing options: %s", err))
+ return 1
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 1
+ }
+
+ err = client.Sys().EnableAuditWithOptions(path, &api.EnableAuditOptions{
+ Type: auditType,
+ Description: desc,
+ Options: opts,
+ Local: local,
+ })
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error enabling audit backend: %s", err))
+ return 1
+ }
+
+ c.Ui.Output(fmt.Sprintf(
+ "Successfully enabled audit backend '%s' with path '%s'!", auditType, path))
+ return 0
+}
+
+func (c *AuditEnableCommand) Synopsis() string {
+ return "Enable an audit backend"
+}
+
+func (c *AuditEnableCommand) Help() string {
+ helpText := `
+Usage: vault audit-enable [options] type [config...]
+
+ Enable an audit backend.
+
+ This command enables an audit backend of type "type". Additional
+ options for configuring the audit backend can be specified after the
+ type in the same format as the "vault write" command in key/value pairs.
+
+ For example, to configure the file audit backend to write audit logs at
+ the path /var/log/audit.log:
+
+ $ vault audit-enable file file_path=/var/log/audit.log
+
+ For information on available configuration options, please see the
+ documentation.
+
+General Options:
+` + meta.GeneralOptionsUsage() + `
+Audit Enable Options:
+
+ -description= A human-friendly description for the backend. This
+ shows up only when querying the enabled backends.
+
+ -path= Specify a unique path for this audit backend. This
+ is purely for referencing this audit backend. By
+ default this will be the backend type.
+
+ -local Mark the mount as a local mount. Local mounts
+ are not replicated nor (if a secondary)
+ removed by replication.
+`
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/audit_enable_test.go b/vendor/github.com/hashicorp/vault/command/audit_enable_test.go
new file mode 100644
index 0000000..118f103
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/audit_enable_test.go
@@ -0,0 +1,56 @@
+package command
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestAuditEnable(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &AuditEnableCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "noop",
+ "foo=bar",
+ }
+
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ // Get the client
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %#v", err)
+ }
+
+ audits, err := client.Sys().ListAudit()
+ if err != nil {
+ t.Fatalf("err: %#v", err)
+ }
+
+ audit, ok := audits["noop/"]
+ if !ok {
+ t.Fatalf("err: %#v", audits)
+ }
+
+ expected := map[string]string{"foo": "bar"}
+ if !reflect.DeepEqual(audit.Options, expected) {
+ t.Fatalf("err: %#v", audit)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/audit_list.go b/vendor/github.com/hashicorp/vault/command/audit_list.go
new file mode 100644
index 0000000..b9914eb
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/audit_list.go
@@ -0,0 +1,87 @@
+package command
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/hashicorp/vault/meta"
+ "github.com/ryanuber/columnize"
+)
+
+// AuditListCommand is a Command that lists the enabled audits.
+type AuditListCommand struct {
+ meta.Meta
+}
+
+func (c *AuditListCommand) Run(args []string) int {
+ flags := c.Meta.FlagSet("audit-list", meta.FlagSetDefault)
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ audits, err := client.Sys().ListAudit()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error reading audits: %s", err))
+ return 2
+ }
+
+ if len(audits) == 0 {
+ c.Ui.Error(fmt.Sprintf(
+ "No audit backends are enabled. Use `vault audit-enable` to\n" +
+ "enable an audit backend."))
+ return 1
+ }
+
+ paths := make([]string, 0, len(audits))
+ for path, _ := range audits {
+ paths = append(paths, path)
+ }
+ sort.Strings(paths)
+
+ columns := []string{"Path | Type | Description | Replication Behavior | Options"}
+ for _, path := range paths {
+ audit := audits[path]
+ opts := make([]string, 0, len(audit.Options))
+ for k, v := range audit.Options {
+ opts = append(opts, k+"="+v)
+ }
+ replicatedBehavior := "replicated"
+ if audit.Local {
+ replicatedBehavior = "local"
+ }
+ columns = append(columns, fmt.Sprintf(
+ "%s | %s | %s | %s | %s", audit.Path, audit.Type, audit.Description, replicatedBehavior, strings.Join(opts, " ")))
+ }
+
+ c.Ui.Output(columnize.SimpleFormat(columns))
+ return 0
+}
+
+func (c *AuditListCommand) Synopsis() string {
+ return "Lists enabled audit backends in Vault"
+}
+
+func (c *AuditListCommand) Help() string {
+ helpText := `
+Usage: vault audit-list [options]
+
+ List the enabled audit backends.
+
+ The output lists the enabled audit backends and the options for those
+ backends. The options may contain sensitive information, and therefore
+ only a root Vault user can view this.
+
+General Options:
+` + meta.GeneralOptionsUsage()
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/audit_list_test.go b/vendor/github.com/hashicorp/vault/command/audit_list_test.go
new file mode 100644
index 0000000..01d4f83
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/audit_list_test.go
@@ -0,0 +1,50 @@
+package command
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestAuditList(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &AuditListCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ }
+
+ // Run once to get the client
+ c.Run(args)
+
+ // Get the client
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %#v", err)
+ }
+ if err := client.Sys().EnableAuditWithOptions("foo", &api.EnableAuditOptions{
+ Type: "noop",
+ Description: "noop",
+ Options: nil,
+ }); err != nil {
+ t.Fatalf("err: %#v", err)
+ }
+
+ // Run again
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/auth.go b/vendor/github.com/hashicorp/vault/command/auth.go
new file mode 100644
index 0000000..2af8780
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/auth.go
@@ -0,0 +1,413 @@
+package command
+
+import (
+ "bufio"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/helper/kv-builder"
+ "github.com/hashicorp/vault/helper/password"
+ "github.com/hashicorp/vault/meta"
+ "github.com/mitchellh/mapstructure"
+ "github.com/ryanuber/columnize"
+)
+
+// AuthHandler is the interface that any auth handlers must implement
+// to enable auth via the CLI.
+type AuthHandler interface {
+ Auth(*api.Client, map[string]string) (string, error)
+ Help() string
+}
+
+// AuthCommand is a Command that handles authentication.
+type AuthCommand struct {
+ meta.Meta
+
+ Handlers map[string]AuthHandler
+
+ // The fields below can be overwritten for tests
+ testStdin io.Reader
+}
+
+func (c *AuthCommand) Run(args []string) int {
+ var method, authPath string
+ var methods, methodHelp, noVerify bool
+ flags := c.Meta.FlagSet("auth", meta.FlagSetDefault)
+ flags.BoolVar(&methods, "methods", false, "")
+ flags.BoolVar(&methodHelp, "method-help", false, "")
+ flags.BoolVar(&noVerify, "no-verify", false, "")
+ flags.StringVar(&method, "method", "", "method")
+ flags.StringVar(&authPath, "path", "", "")
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ if methods {
+ return c.listMethods()
+ }
+
+ args = flags.Args()
+
+ tokenHelper, err := c.TokenHelper()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing token helper: %s\n\n"+
+ "Please verify that the token helper is available and properly\n"+
+ "configured for your system. Please refer to the documentation\n"+
+ "on token helpers for more information.",
+ err))
+ return 1
+ }
+
+ // token is where the final token will go
+ handler := c.Handlers[method]
+
+ // Read token from stdin if first arg is exactly "-"
+ var stdin io.Reader = os.Stdin
+ if c.testStdin != nil {
+ stdin = c.testStdin
+ }
+
+ if len(args) > 0 && args[0] == "-" {
+ stdinR := bufio.NewReader(stdin)
+ args[0], err = stdinR.ReadString('\n')
+ if err != nil && err != io.EOF {
+ c.Ui.Error(fmt.Sprintf("Error reading from stdin: %s", err))
+ return 1
+ }
+ args[0] = strings.TrimSpace(args[0])
+ }
+
+ if method == "" {
+ token := ""
+ if len(args) > 0 {
+ token = args[0]
+ }
+
+ handler = &tokenAuthHandler{Token: token}
+ args = nil
+
+ switch authPath {
+ case "", "auth/token":
+ default:
+ c.Ui.Error("Token authentication does not support custom paths")
+ return 1
+ }
+ }
+
+ if handler == nil {
+ methods := make([]string, 0, len(c.Handlers))
+ for k := range c.Handlers {
+ methods = append(methods, k)
+ }
+ sort.Strings(methods)
+
+ c.Ui.Error(fmt.Sprintf(
+ "Unknown authentication method: %s\n\n"+
+ "Please use a supported authentication method. The list of supported\n"+
+ "authentication methods is shown below. Note that this list may not\n"+
+ "be exhaustive: Vault may support other auth methods. For auth methods\n"+
+ "unsupported by the CLI, please use the HTTP API.\n\n"+
+ "%s",
+ method,
+ strings.Join(methods, ", ")))
+ return 1
+ }
+
+ if methodHelp {
+ c.Ui.Output(handler.Help())
+ return 0
+ }
+
+ // Warn if the VAULT_TOKEN environment variable is set, as that will take
+ // precedence
+ if os.Getenv("VAULT_TOKEN") != "" {
+ c.Ui.Output("==> WARNING: VAULT_TOKEN environment variable set!\n")
+ c.Ui.Output(" The environment variable takes precedence over the value")
+ c.Ui.Output(" set by the auth command. Either update the value of the")
+ c.Ui.Output(" environment variable or unset it to use the new token.\n")
+ }
+
+ var vars map[string]string
+ if len(args) > 0 {
+ builder := kvbuilder.Builder{Stdin: os.Stdin}
+ if err := builder.Add(args...); err != nil {
+ c.Ui.Error(err.Error())
+ return 1
+ }
+
+ if err := mapstructure.Decode(builder.Map(), &vars); err != nil {
+ c.Ui.Error(fmt.Sprintf("Error parsing options: %s", err))
+ return 1
+ }
+ } else {
+ vars = make(map[string]string)
+ }
+
+ // Build the client so we can auth
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client to auth: %s", err))
+ return 1
+ }
+
+ if authPath != "" {
+ vars["mount"] = authPath
+ }
+
+ // Authenticate
+ token, err := handler.Auth(client, vars)
+ if err != nil {
+ c.Ui.Error(err.Error())
+ return 1
+ }
+
+ // Cache the previous token so that it can be restored if authentication fails
+ var previousToken string
+ if previousToken, err = tokenHelper.Get(); err != nil {
+ c.Ui.Error(fmt.Sprintf("Error caching the previous token: %s\n\n", err))
+ return 1
+ }
+
+ // Store the token!
+ if err := tokenHelper.Store(token); err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error storing token: %s\n\n"+
+ "Authentication was not successful and did not persist.\n"+
+ "Please reauthenticate, or fix the issue above if possible.",
+ err))
+ return 1
+ }
+
+ if noVerify {
+ c.Ui.Output(fmt.Sprintf(
+ "Authenticated - no token verification has been performed.",
+ ))
+
+ return 0
+ }
+
+ // Build the client again so it can read the token we just wrote
+ client, err = c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client to verify the token: %s", err))
+ if err := tokenHelper.Store(previousToken); err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error restoring the previous token: %s\n\n"+
+ "Please reauthenticate with a valid token.",
+ err))
+ }
+ return 1
+ }
+
+ // Verify the token
+ secret, err := client.Auth().Token().LookupSelf()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error validating token: %s", err))
+ if err := tokenHelper.Store(previousToken); err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error restoring the previous token: %s\n\n"+
+ "Please reauthenticate with a valid token.",
+ err))
+ }
+ return 1
+ }
+ if secret == nil {
+ c.Ui.Error(fmt.Sprintf("Error: Invalid token"))
+ if err := tokenHelper.Store(previousToken); err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error restoring the previous token: %s\n\n"+
+ "Please reauthenticate with a valid token.",
+ err))
+ }
+ return 1
+ }
+
+ // Get the policies we have
+ policiesRaw, ok := secret.Data["policies"]
+ if !ok {
+ policiesRaw = []string{"unknown"}
+ }
+ var policies []string
+ for _, v := range policiesRaw.([]interface{}) {
+ policies = append(policies, v.(string))
+ }
+
+ output := "Successfully authenticated! You are now logged in."
+ if method != "" {
+ output += "\nThe token below is already saved in the session. You do not"
+ output += "\nneed to \"vault auth\" again with the token."
+ }
+ output += fmt.Sprintf("\ntoken: %s", secret.Data["id"])
+ output += fmt.Sprintf("\ntoken_duration: %s", secret.Data["ttl"].(json.Number).String())
+ if len(policies) > 0 {
+ output += fmt.Sprintf("\ntoken_policies: %v", policies)
+ }
+
+ c.Ui.Output(output)
+
+ return 0
+
+}
+
+func (c *AuthCommand) listMethods() int {
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 1
+ }
+
+ auth, err := client.Sys().ListAuth()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error reading auth table: %s", err))
+ return 1
+ }
+
+ paths := make([]string, 0, len(auth))
+ for path := range auth {
+ paths = append(paths, path)
+ }
+ sort.Strings(paths)
+
+ columns := []string{"Path | Type | Default TTL | Max TTL | Replication Behavior | Description"}
+ for _, path := range paths {
+ auth := auth[path]
+ defTTL := "system"
+ if auth.Config.DefaultLeaseTTL != 0 {
+ defTTL = strconv.Itoa(auth.Config.DefaultLeaseTTL)
+ }
+ maxTTL := "system"
+ if auth.Config.MaxLeaseTTL != 0 {
+ maxTTL = strconv.Itoa(auth.Config.MaxLeaseTTL)
+ }
+ replicatedBehavior := "replicated"
+ if auth.Local {
+ replicatedBehavior = "local"
+ }
+ columns = append(columns, fmt.Sprintf(
+ "%s | %s | %s | %s | %s | %s", path, auth.Type, defTTL, maxTTL, replicatedBehavior, auth.Description))
+ }
+
+ c.Ui.Output(columnize.SimpleFormat(columns))
+ return 0
+}
+
+func (c *AuthCommand) Synopsis() string {
+ return "Prints information about how to authenticate with Vault"
+}
+
+func (c *AuthCommand) Help() string {
+ helpText := `
+Usage: vault auth [options] [auth-information]
+
+ Authenticate with Vault using the given token or via any supported
+ authentication backend.
+
+ By default, the -method is assumed to be token. If not supplied via the
+ command-line, a prompt for input will be shown. If the authentication
+ information is "-", it will be read from stdin.
+
+ The -method option allows alternative authentication methods to be used,
+ such as userpass, GitHub, or TLS certificates. For these, additional
+ values as "key=value" pairs may be required. For example, to authenticate
+ to the userpass auth backend:
+
+ $ vault auth -method=userpass username=my-username
+
+ Use "-method-help" to get help for a specific method.
+
+ If an auth backend is enabled at a different path, the "-method" flag
+ should still point to the canonical name, and the "-path" flag should be
+ used. If a GitHub auth backend was mounted as "github-private", one would
+ authenticate to this backend via:
+
+ $ vault auth -method=github -path=github-private
+
+ The value of the "-path" flag is supplied to auth providers as the "mount"
+ option in the payload to specify the mount point.
+
+General Options:
+
+ ` + meta.GeneralOptionsUsage() + `
+
+Auth Options:
+
+ -method=name Outputs help for the authentication method with the given
+ name for the remote server. If this authentication method
+ is not available, exit with code 1.
+
+ -method-help If set, the help for the selected method will be shown.
+
+ -methods List the available auth methods.
+
+ -no-verify Do not verify the token after creation; avoids a use count
+ decrement.
+
+ -path The path at which the auth backend is enabled. If an auth
+ backend is mounted at multiple paths, this option can be
+ used to authenticate against specific paths.
+`
+ return strings.TrimSpace(helpText)
+}
+
+// tokenAuthHandler handles retrieving the token from the command-line.
+type tokenAuthHandler struct {
+ Token string
+}
+
+func (h *tokenAuthHandler) Auth(*api.Client, map[string]string) (string, error) {
+ token := h.Token
+ if token == "" {
+ var err error
+
+ // No arguments given, read the token from user input
+ fmt.Printf("Token (will be hidden): ")
+ token, err = password.Read(os.Stdin)
+ fmt.Printf("\n")
+ if err != nil {
+ return "", fmt.Errorf(
+ "Error attempting to ask for token. The raw error message\n"+
+ "is shown below, but the most common reason for this error is\n"+
+ "that you attempted to pipe a value into auth. If you want to\n"+
+ "pipe the token, please pass '-' as the token argument.\n\n"+
+ "Raw error: %s", err)
+ }
+ }
+
+ if token == "" {
+ return "", fmt.Errorf(
+ "A token must be passed to auth. Please view the help\n" +
+ "for more information.")
+ }
+
+ return token, nil
+}
+
+func (h *tokenAuthHandler) Help() string {
+ help := `
+No method selected with the "-method" flag, so the "auth" command assumes
+you'll be using raw token authentication. For this, specify the token to
+authenticate as the parameter to "vault auth". Example:
+
+ vault auth 123456
+
+The token used to authenticate must come from some other source. A root
+token is created when Vault is first initialized. After that, subsequent
+tokens are created via the API or command line interface (with the
+"token"-prefixed commands).
+`
+
+ return strings.TrimSpace(help)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/auth_disable.go b/vendor/github.com/hashicorp/vault/command/auth_disable.go
new file mode 100644
index 0000000..621ce59
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/auth_disable.go
@@ -0,0 +1,69 @@
+package command
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/meta"
+)
+
+// AuthDisableCommand is a Command that enables a new endpoint.
+type AuthDisableCommand struct {
+ meta.Meta
+}
+
+func (c *AuthDisableCommand) Run(args []string) int {
+ flags := c.Meta.FlagSet("auth-disable", meta.FlagSetDefault)
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ args = flags.Args()
+ if len(args) != 1 {
+ flags.Usage()
+ c.Ui.Error(fmt.Sprintf(
+ "\nauth-disable expects one argument: the path to disable."))
+ return 1
+ }
+
+ path := args[0]
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ if err := client.Sys().DisableAuth(path); err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error: %s", err))
+ return 2
+ }
+
+ c.Ui.Output(fmt.Sprintf(
+ "Disabled auth provider at path '%s' if it was enabled", path))
+
+ return 0
+}
+
+func (c *AuthDisableCommand) Synopsis() string {
+ return "Disable an auth provider"
+}
+
+func (c *AuthDisableCommand) Help() string {
+ helpText := `
+Usage: vault auth-disable [options] path
+
+ Disable an already-enabled auth provider.
+
+ Once the auth provider is disabled its path can no longer be used
+ to authenticate. All access tokens generated via the disabled auth provider
+ will be revoked. This command will block until all tokens are revoked.
+ If the command is exited early the tokens will still be revoked.
+
+General Options:
+` + meta.GeneralOptionsUsage()
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/auth_disable_test.go b/vendor/github.com/hashicorp/vault/command/auth_disable_test.go
new file mode 100644
index 0000000..fb2b91f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/auth_disable_test.go
@@ -0,0 +1,102 @@
+package command
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestAuthDisable(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &AuthDisableCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "noop",
+ }
+
+ // Run the command once to setup the client, it will fail
+ c.Run(args)
+
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err := client.Sys().EnableAuth("noop", "noop", ""); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ mounts, err := client.Sys().ListAuth()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if _, ok := mounts["noop"]; ok {
+ t.Fatal("should not have noop mount")
+ }
+}
+
+func TestAuthDisableWithOptions(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &AuthDisableCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "noop",
+ }
+
+ // Run the command once to setup the client, it will fail
+ c.Run(args)
+
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err := client.Sys().EnableAuthWithOptions("noop", &api.EnableAuthOptions{
+ Type: "noop",
+ Description: "",
+ }); err != nil {
+ t.Fatalf("err: %#v", err)
+ }
+
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ mounts, err := client.Sys().ListAuth()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if _, ok := mounts["noop"]; ok {
+ t.Fatal("should not have noop mount")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/auth_enable.go b/vendor/github.com/hashicorp/vault/command/auth_enable.go
new file mode 100644
index 0000000..81c7cce
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/auth_enable.go
@@ -0,0 +1,97 @@
+package command
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/meta"
+)
+
+// AuthEnableCommand is a Command that enables a new endpoint.
+type AuthEnableCommand struct {
+ meta.Meta
+}
+
+func (c *AuthEnableCommand) Run(args []string) int {
+ var description, path string
+ var local bool
+ flags := c.Meta.FlagSet("auth-enable", meta.FlagSetDefault)
+ flags.StringVar(&description, "description", "", "")
+ flags.StringVar(&path, "path", "", "")
+ flags.BoolVar(&local, "local", false, "")
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ args = flags.Args()
+ if len(args) != 1 {
+ flags.Usage()
+ c.Ui.Error(fmt.Sprintf(
+ "\nauth-enable expects one argument: the type to enable."))
+ return 1
+ }
+
+ authType := args[0]
+
+ // If no path is specified, we default the path to the backend type
+ if path == "" {
+ path = authType
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ if err := client.Sys().EnableAuthWithOptions(path, &api.EnableAuthOptions{
+ Type: authType,
+ Description: description,
+ Local: local,
+ }); err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error: %s", err))
+ return 2
+ }
+
+ c.Ui.Output(fmt.Sprintf(
+ "Successfully enabled '%s' at '%s'!",
+ authType, path))
+
+ return 0
+}
+
+func (c *AuthEnableCommand) Synopsis() string {
+ return "Enable a new auth provider"
+}
+
+func (c *AuthEnableCommand) Help() string {
+ helpText := `
+Usage: vault auth-enable [options] type
+
+ Enable a new auth provider.
+
+ This command enables a new auth provider. An auth provider is responsible
+ for authenticating a user and assigning them policies with which they can
+ access Vault.
+
+General Options:
+` + meta.GeneralOptionsUsage() + `
+Auth Enable Options:
+
+ -description= Human-friendly description of the purpose of the
+ auth provider. This shows up in the auth -methods command.
+
+ -path= Mount point for the auth provider. This defaults
+ to the type of the mount. This will make the auth
+ provider available at "/auth/"
+
+ -local Mark the mount as a local mount. Local mounts
+ are not replicated nor (if a secondary)
+ removed by replication.
+`
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/auth_enable_test.go b/vendor/github.com/hashicorp/vault/command/auth_enable_test.go
new file mode 100644
index 0000000..0f83487
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/auth_enable_test.go
@@ -0,0 +1,50 @@
+package command
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestAuthEnable(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &AuthEnableCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "noop",
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ mounts, err := client.Sys().ListAuth()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ mount, ok := mounts["noop/"]
+ if !ok {
+ t.Fatal("should have noop mount")
+ }
+ if mount.Type != "noop" {
+ t.Fatal("should be noop type")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/auth_test.go b/vendor/github.com/hashicorp/vault/command/auth_test.go
new file mode 100644
index 0000000..9ffd0ac
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/auth_test.go
@@ -0,0 +1,205 @@
+package command
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestAuth_methods(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ testAuthInit(t)
+
+ ui := new(cli.MockUi)
+ c := &AuthCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ TokenHelper: DefaultTokenHelper,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "-methods",
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ output := ui.OutputWriter.String()
+ if !strings.Contains(output, "token") {
+ t.Fatalf("bad: %#v", output)
+ }
+}
+
+func TestAuth_token(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ testAuthInit(t)
+
+ ui := new(cli.MockUi)
+ c := &AuthCommand{
+ Meta: meta.Meta{
+ Ui: ui,
+ TokenHelper: DefaultTokenHelper,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ token,
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ helper, err := c.TokenHelper()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ actual, err := helper.Get()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if actual != token {
+ t.Fatalf("bad: %s", actual)
+ }
+}
+
+func TestAuth_stdin(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ testAuthInit(t)
+
+ stdinR, stdinW := io.Pipe()
+ ui := new(cli.MockUi)
+ c := &AuthCommand{
+ Meta: meta.Meta{
+ Ui: ui,
+ TokenHelper: DefaultTokenHelper,
+ },
+ testStdin: stdinR,
+ }
+
+ go func() {
+ stdinW.Write([]byte(token))
+ stdinW.Close()
+ }()
+
+ args := []string{
+ "-address", addr,
+ "-",
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+}
+
+func TestAuth_badToken(t *testing.T) {
+ core, _, _ := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ testAuthInit(t)
+
+ ui := new(cli.MockUi)
+ c := &AuthCommand{
+ Meta: meta.Meta{
+ Ui: ui,
+ TokenHelper: DefaultTokenHelper,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "not-a-valid-token",
+ }
+ if code := c.Run(args); code != 1 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+}
+
+func TestAuth_method(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ testAuthInit(t)
+
+ ui := new(cli.MockUi)
+ c := &AuthCommand{
+ Handlers: map[string]AuthHandler{
+ "test": &testAuthHandler{},
+ },
+ Meta: meta.Meta{
+ Ui: ui,
+ TokenHelper: DefaultTokenHelper,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "-method=test",
+ "foo=" + token,
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ helper, err := c.TokenHelper()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ actual, err := helper.Get()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if actual != token {
+ t.Fatalf("bad: %s", actual)
+ }
+}
+
+func testAuthInit(t *testing.T) {
+ td, err := ioutil.TempDir("", "vault")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Set the HOME env var so we get that right
+ os.Setenv("HOME", td)
+
+ // Write a .vault config to use our custom token helper
+ config := fmt.Sprintf(
+ "token_helper = \"\"\n")
+ ioutil.WriteFile(filepath.Join(td, ".vault"), []byte(config), 0644)
+}
+
+type testAuthHandler struct{}
+
+func (h *testAuthHandler) Auth(c *api.Client, m map[string]string) (string, error) {
+ return m["foo"], nil
+}
+
+func (h *testAuthHandler) Help() string { return "" }
diff --git a/vendor/github.com/hashicorp/vault/command/capabilities.go b/vendor/github.com/hashicorp/vault/command/capabilities.go
new file mode 100644
index 0000000..bb60bd4
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/capabilities.go
@@ -0,0 +1,87 @@
+package command
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/meta"
+)
+
+// CapabilitiesCommand is a Command that enables a new endpoint.
+type CapabilitiesCommand struct {
+ meta.Meta
+}
+
+func (c *CapabilitiesCommand) Run(args []string) int {
+ flags := c.Meta.FlagSet("capabilities", meta.FlagSetDefault)
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ args = flags.Args()
+ if len(args) > 2 {
+ flags.Usage()
+ c.Ui.Error(fmt.Sprintf(
+ "\ncapabilities expects at most two arguments"))
+ return 1
+ }
+
+ var token string
+ var path string
+ switch {
+ case len(args) == 1:
+ path = args[0]
+ case len(args) == 2:
+ token = args[0]
+ path = args[1]
+ default:
+ flags.Usage()
+ c.Ui.Error(fmt.Sprintf("\ncapabilities expects at least one argument"))
+ return 1
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ var capabilities []string
+ if token == "" {
+ capabilities, err = client.Sys().CapabilitiesSelf(path)
+ } else {
+ capabilities, err = client.Sys().Capabilities(token, path)
+ }
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error retrieving capabilities: %s", err))
+ return 1
+ }
+
+ c.Ui.Output(fmt.Sprintf("Capabilities: %s", capabilities))
+ return 0
+}
+
+func (c *CapabilitiesCommand) Synopsis() string {
+ return "Fetch the capabilities of a token on a given path"
+}
+
+func (c *CapabilitiesCommand) Help() string {
+ helpText := `
+Usage: vault capabilities [options] [token] path
+
+ Fetch the capabilities of a token on a given path.
+ If a token is provided as an argument, the '/sys/capabilities' endpoint will be invoked
+ with the given token; otherwise the '/sys/capabilities-self' endpoint will be invoked
+ with the client token.
+
+ If a token does not have any capability on a given path, or if any of the policies
+ belonging to the token explicitly have ["deny"] capability, or if the argument path
+ is invalid, this command will respond with a ["deny"].
+
+General Options:
+` + meta.GeneralOptionsUsage()
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/capabilities_test.go b/vendor/github.com/hashicorp/vault/command/capabilities_test.go
new file mode 100644
index 0000000..5d106a1
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/capabilities_test.go
@@ -0,0 +1,45 @@
+package command
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestCapabilities_Basic(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+ ui := new(cli.MockUi)
+ c := &CapabilitiesCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ var args []string
+
+ args = []string{"-address", addr}
+ if code := c.Run(args); code == 0 {
+ t.Fatalf("expected failure due to no args")
+ }
+
+ args = []string{"-address", addr, "testpath"}
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ args = []string{"-address", addr, token, "test"}
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ args = []string{"-address", addr, "invalidtoken", "test"}
+ if code := c.Run(args); code == 0 {
+ t.Fatalf("expected failure due to invalid token")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/command_test.go b/vendor/github.com/hashicorp/vault/command/command_test.go
new file mode 100644
index 0000000..763587a
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/command_test.go
@@ -0,0 +1,19 @@
+package command
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/api"
+)
+
+func testClient(t *testing.T, addr string, token string) *api.Client {
+ config := api.DefaultConfig()
+ config.Address = addr
+ client, err := api.NewClient(config)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ client.SetToken(token)
+
+ return client
+}
diff --git a/vendor/github.com/hashicorp/vault/command/config.go b/vendor/github.com/hashicorp/vault/command/config.go
new file mode 100644
index 0000000..ee866fa
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/config.go
@@ -0,0 +1,123 @@
+package command
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/hcl"
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/mitchellh/go-homedir"
+)
+
+const (
+ // DefaultConfigPath is the default path to the configuration file
+ DefaultConfigPath = "~/.vault"
+
+ // ConfigPathEnv is the environment variable that can be used to
+ // override where the Vault configuration is.
+ ConfigPathEnv = "VAULT_CONFIG_PATH"
+)
+
+// Config is the CLI configuration for Vault that can be specified via
+// a `$HOME/.vault` file which is HCL-formatted (therefore HCL or JSON).
+type DefaultConfig struct {
+ // TokenHelper is the executable/command that is executed for storing
+ // and retrieving the authentication token for the Vault CLI. If this
+ // is not specified, then vault's internal token store will be used, which
+ // stores the token on disk unencrypted.
+ TokenHelper string `hcl:"token_helper"`
+}
+
+// Config loads the configuration and returns it. If the configuration
+// is already loaded, it is returned.
+func Config() (*DefaultConfig, error) {
+ var err error
+ config, err := LoadConfig("")
+ if err != nil {
+ return nil, err
+ }
+
+ return config, nil
+}
+
+// LoadConfig reads the configuration from the given path. If path is
+// empty, then the default path will be used, or the environment variable
+// if set.
+func LoadConfig(path string) (*DefaultConfig, error) {
+ if path == "" {
+ path = DefaultConfigPath
+ }
+ if v := os.Getenv(ConfigPathEnv); v != "" {
+ path = v
+ }
+
+ // NOTE: requires HOME env var to be set
+ path, err := homedir.Expand(path)
+ if err != nil {
+ return nil, fmt.Errorf("Error expanding config path %s: %s", path, err)
+ }
+
+ contents, err := ioutil.ReadFile(path)
+ if err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+
+ return ParseConfig(string(contents))
+}
+
+// ParseConfig parses the given configuration as a string.
+func ParseConfig(contents string) (*DefaultConfig, error) {
+ root, err := hcl.Parse(contents)
+ if err != nil {
+ return nil, err
+ }
+
+ // Top-level item should be the object list
+ list, ok := root.Node.(*ast.ObjectList)
+ if !ok {
+ return nil, fmt.Errorf("Failed to parse config: does not contain a root object")
+ }
+
+ valid := []string{
+ "token_helper",
+ }
+ if err := checkHCLKeys(list, valid); err != nil {
+ return nil, err
+ }
+
+ var c DefaultConfig
+ if err := hcl.DecodeObject(&c, list); err != nil {
+ return nil, err
+ }
+ return &c, nil
+}
+
+func checkHCLKeys(node ast.Node, valid []string) error {
+ var list *ast.ObjectList
+ switch n := node.(type) {
+ case *ast.ObjectList:
+ list = n
+ case *ast.ObjectType:
+ list = n.List
+ default:
+ return fmt.Errorf("cannot check HCL keys of type %T", n)
+ }
+
+ validMap := make(map[string]struct{}, len(valid))
+ for _, v := range valid {
+ validMap[v] = struct{}{}
+ }
+
+ var result error
+ for _, item := range list.Items {
+ key := item.Keys[0].Token.Value().(string)
+ if _, ok := validMap[key]; !ok {
+ result = multierror.Append(result, fmt.Errorf(
+ "invalid key '%s' on line %d", key, item.Assign.Line))
+ }
+ }
+
+ return result
+}
diff --git a/vendor/github.com/hashicorp/vault/command/config_test.go b/vendor/github.com/hashicorp/vault/command/config_test.go
new file mode 100644
index 0000000..edbf557
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/config_test.go
@@ -0,0 +1,49 @@
+package command
+
+import (
+ "path/filepath"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+const FixturePath = "./test-fixtures"
+
+func TestLoadConfig(t *testing.T) {
+ config, err := LoadConfig(filepath.Join(FixturePath, "config.hcl"))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ expected := &DefaultConfig{
+ TokenHelper: "foo",
+ }
+ if !reflect.DeepEqual(expected, config) {
+ t.Fatalf("bad: %#v", config)
+ }
+}
+
+func TestLoadConfig_noExist(t *testing.T) {
+ config, err := LoadConfig("nope/not-once/.never")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if config.TokenHelper != "" {
+ t.Errorf("expected %q to be %q", config.TokenHelper, "")
+ }
+}
+
+func TestParseConfig_badKeys(t *testing.T) {
+ _, err := ParseConfig(`
+token_helper = "/token"
+nope = "true"
+`)
+ if err == nil {
+ t.Fatal("expected error")
+ }
+
+ if !strings.Contains(err.Error(), "invalid key 'nope' on line 3") {
+ t.Errorf("bad error: %s", err.Error())
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/delete.go b/vendor/github.com/hashicorp/vault/command/delete.go
new file mode 100644
index 0000000..d9a8ee8
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/delete.go
@@ -0,0 +1,67 @@
+package command
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/meta"
+)
+
+// DeleteCommand is a Command that puts data into the Vault.
+type DeleteCommand struct {
+ meta.Meta
+}
+
+func (c *DeleteCommand) Run(args []string) int {
+ flags := c.Meta.FlagSet("delete", meta.FlagSetDefault)
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ args = flags.Args()
+ if len(args) != 1 {
+ c.Ui.Error("delete expects one argument")
+ flags.Usage()
+ return 1
+ }
+
+ path := args[0]
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ if _, err := client.Logical().Delete(path); err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error deleting '%s': %s", path, err))
+ return 1
+ }
+
+ c.Ui.Output(fmt.Sprintf("Success! Deleted '%s' if it existed.", path))
+ return 0
+}
+
+func (c *DeleteCommand) Synopsis() string {
+ return "Delete operation on secrets in Vault"
+}
+
+func (c *DeleteCommand) Help() string {
+ helpText := `
+Usage: vault delete [options] path
+
+ Delete data (secrets or configuration) from Vault.
+
+ Delete sends a delete operation request to the given path. The
+ behavior of the delete is determined by the backend at the given
+ path. For example, deleting "aws/policy/ops" will delete the "ops"
+ policy for the AWS backend. Use "vault help" for more details on
+ whether delete is supported for a path and what the behavior is.
+
+General Options:
+` + meta.GeneralOptionsUsage()
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/delete_test.go b/vendor/github.com/hashicorp/vault/command/delete_test.go
new file mode 100644
index 0000000..c5efc41
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/delete_test.go
@@ -0,0 +1,56 @@
+package command
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestDelete(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &DeleteCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "secret/foo",
+ }
+
+ // Run once so the client is setup, ignore errors
+ c.Run(args)
+
+ // Get the client so we can write data
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ data := map[string]interface{}{"value": "bar"}
+ if _, err := client.Logical().Write("secret/foo", data); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Run the delete
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ resp, err := client.Logical().Read("secret/foo")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/format.go b/vendor/github.com/hashicorp/vault/command/format.go
new file mode 100644
index 0000000..4520b20
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/format.go
@@ -0,0 +1,221 @@
+package command
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/ghodss/yaml"
+ "github.com/hashicorp/vault/api"
+ "github.com/mitchellh/cli"
+ "github.com/ryanuber/columnize"
+)
+
+func OutputSecret(ui cli.Ui, format string, secret *api.Secret) int {
+ return outputWithFormat(ui, format, secret, secret)
+}
+
+func OutputList(ui cli.Ui, format string, secret *api.Secret) int {
+ return outputWithFormat(ui, format, secret, secret.Data["keys"])
+}
+
+func outputWithFormat(ui cli.Ui, format string, secret *api.Secret, data interface{}) int {
+ formatter, ok := Formatters[strings.ToLower(format)]
+ if !ok {
+ ui.Error(fmt.Sprintf("Invalid output format: %s", format))
+ return 1
+ }
+ if err := formatter.Output(ui, secret, data); err != nil {
+ ui.Error(fmt.Sprintf("Could not output secret: %s", err.Error()))
+ return 1
+ }
+ return 0
+}
+
+type Formatter interface {
+ Output(ui cli.Ui, secret *api.Secret, data interface{}) error
+}
+
+var Formatters = map[string]Formatter{
+ "json": JsonFormatter{},
+ "table": TableFormatter{},
+ "yaml": YamlFormatter{},
+ "yml": YamlFormatter{},
+}
+
+// An output formatter for json output of an object
+type JsonFormatter struct {
+}
+
+func (j JsonFormatter) Output(ui cli.Ui, secret *api.Secret, data interface{}) error {
+ b, err := json.Marshal(data)
+ if err == nil {
+ var out bytes.Buffer
+ json.Indent(&out, b, "", "\t")
+ ui.Output(out.String())
+ }
+ return err
+}
+
+// An output formatter for yaml output format of an object
+type YamlFormatter struct {
+}
+
+func (y YamlFormatter) Output(ui cli.Ui, secret *api.Secret, data interface{}) error {
+ b, err := yaml.Marshal(data)
+ if err == nil {
+ ui.Output(strings.TrimSpace(string(b)))
+ }
+ return err
+}
+
+// An output formatter for table output of an object
+type TableFormatter struct {
+}
+
+func (t TableFormatter) Output(ui cli.Ui, secret *api.Secret, data interface{}) error {
+ // TODO: this should really use reflection like the other formatters do
+ if s, ok := data.(*api.Secret); ok {
+ return t.OutputSecret(ui, secret, s)
+ }
+ if s, ok := data.([]interface{}); ok {
+ return t.OutputList(ui, secret, s)
+ }
+ return errors.New("Cannot use the table formatter for this type")
+}
+
+func (t TableFormatter) OutputList(ui cli.Ui, secret *api.Secret, list []interface{}) error {
+ config := columnize.DefaultConfig()
+ config.Delim = "♨"
+ config.Glue = "\t"
+ config.Prefix = ""
+
+ input := make([]string, 0, 5)
+
+ if len(list) > 0 {
+ input = append(input, "Keys")
+ input = append(input, "----")
+
+ keys := make([]string, 0, len(list))
+ for _, k := range list {
+ keys = append(keys, k.(string))
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ input = append(input, fmt.Sprintf("%s", k))
+ }
+ }
+
+ tableOutputStr := columnize.Format(input, config)
+
+ // Print the warning separately because the length of first
+ // column in the output will be increased by the length of
+ // the longest warning string making the output look bad.
+ warningsInput := make([]string, 0, 5)
+ if len(secret.Warnings) != 0 {
+ warningsInput = append(warningsInput, "")
+ warningsInput = append(warningsInput, "The following warnings were returned from the Vault server:")
+ for _, warning := range secret.Warnings {
+ warningsInput = append(warningsInput, fmt.Sprintf("* %s", warning))
+ }
+ }
+
+ warningsOutputStr := columnize.Format(warningsInput, config)
+
+ ui.Output(fmt.Sprintf("%s\n%s", tableOutputStr, warningsOutputStr))
+
+ return nil
+}
+
+func (t TableFormatter) OutputSecret(ui cli.Ui, secret, s *api.Secret) error {
+ config := columnize.DefaultConfig()
+ config.Delim = "♨"
+ config.Glue = "\t"
+ config.Prefix = ""
+
+ input := make([]string, 0, 5)
+
+ onceHeader := &sync.Once{}
+ headerFunc := func() {
+ input = append(input, fmt.Sprintf("Key %s Value", config.Delim))
+ input = append(input, fmt.Sprintf("--- %s -----", config.Delim))
+ }
+
+ if s.LeaseDuration > 0 {
+ onceHeader.Do(headerFunc)
+ if s.LeaseID != "" {
+ input = append(input, fmt.Sprintf("lease_id %s %s", config.Delim, s.LeaseID))
+ input = append(input, fmt.Sprintf(
+ "lease_duration %s %s", config.Delim, (time.Second*time.Duration(s.LeaseDuration)).String()))
+ } else {
+ input = append(input, fmt.Sprintf(
+ "refresh_interval %s %s", config.Delim, (time.Second*time.Duration(s.LeaseDuration)).String()))
+ }
+ if s.LeaseID != "" {
+ input = append(input, fmt.Sprintf(
+ "lease_renewable %s %s", config.Delim, strconv.FormatBool(s.Renewable)))
+ }
+ }
+
+ if s.Auth != nil {
+ onceHeader.Do(headerFunc)
+ input = append(input, fmt.Sprintf("token %s %s", config.Delim, s.Auth.ClientToken))
+ input = append(input, fmt.Sprintf("token_accessor %s %s", config.Delim, s.Auth.Accessor))
+ input = append(input, fmt.Sprintf("token_duration %s %s", config.Delim, (time.Second*time.Duration(s.Auth.LeaseDuration)).String()))
+ input = append(input, fmt.Sprintf("token_renewable %s %v", config.Delim, s.Auth.Renewable))
+ input = append(input, fmt.Sprintf("token_policies %s %v", config.Delim, s.Auth.Policies))
+ for k, v := range s.Auth.Metadata {
+ input = append(input, fmt.Sprintf("token_meta_%s %s %#v", k, config.Delim, v))
+ }
+ }
+
+ if s.WrapInfo != nil {
+ onceHeader.Do(headerFunc)
+ input = append(input, fmt.Sprintf("wrapping_token: %s %s", config.Delim, s.WrapInfo.Token))
+ input = append(input, fmt.Sprintf("wrapping_token_ttl: %s %s", config.Delim, (time.Second*time.Duration(s.WrapInfo.TTL)).String()))
+ input = append(input, fmt.Sprintf("wrapping_token_creation_time: %s %s", config.Delim, s.WrapInfo.CreationTime.String()))
+ if s.WrapInfo.WrappedAccessor != "" {
+ input = append(input, fmt.Sprintf("wrapped_accessor: %s %s", config.Delim, s.WrapInfo.WrappedAccessor))
+ }
+ }
+
+ if s.Data != nil && len(s.Data) > 0 {
+ onceHeader.Do(headerFunc)
+ keys := make([]string, 0, len(s.Data))
+ for k := range s.Data {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ input = append(input, fmt.Sprintf("%s %s %v", k, config.Delim, s.Data[k]))
+ }
+ }
+
+ tableOutputStr := columnize.Format(input, config)
+
+ // Print the warning separately because the length of first
+ // column in the output will be increased by the length of
+ // the longest warning string making the output look bad.
+ warningsInput := make([]string, 0, 5)
+ if len(s.Warnings) != 0 {
+ warningsInput = append(warningsInput, "")
+ warningsInput = append(warningsInput, "The following warnings were returned from the Vault server:")
+ for _, warning := range s.Warnings {
+ warningsInput = append(warningsInput, fmt.Sprintf("* %s", warning))
+ }
+ }
+
+ warningsOutputStr := columnize.Format(warningsInput, config)
+
+ ui.Output(fmt.Sprintf("%s\n%s", tableOutputStr, warningsOutputStr))
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/command/format_test.go b/vendor/github.com/hashicorp/vault/command/format_test.go
new file mode 100644
index 0000000..8e32d24
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/format_test.go
@@ -0,0 +1,82 @@
+package command
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/ghodss/yaml"
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/helper/jsonutil"
+)
+
+var output string
+
+type mockUi struct {
+ t *testing.T
+ SampleData string
+}
+
+func (m mockUi) Ask(_ string) (string, error) {
+ m.t.FailNow()
+ return "", nil
+}
+func (m mockUi) AskSecret(_ string) (string, error) {
+ m.t.FailNow()
+ return "", nil
+}
+func (m mockUi) Output(s string) {
+ output = s
+}
+func (m mockUi) Info(s string) {
+ m.t.Log(s)
+}
+func (m mockUi) Error(s string) {
+ m.t.Log(s)
+}
+func (m mockUi) Warn(s string) {
+ m.t.Log(s)
+}
+
+func TestJsonFormatter(t *testing.T) {
+ ui := mockUi{t: t, SampleData: "something"}
+ if err := outputWithFormat(ui, "json", nil, ui); err != 0 {
+ t.Fatal(err)
+ }
+ var newUi mockUi
+ if err := jsonutil.DecodeJSON([]byte(output), &newUi); err != nil {
+ t.Fatal(err)
+ }
+ if newUi.SampleData != ui.SampleData {
+ t.Fatalf(`values not equal ("%s" != "%s")`,
+ newUi.SampleData,
+ ui.SampleData)
+ }
+}
+
+func TestYamlFormatter(t *testing.T) {
+ ui := mockUi{t: t, SampleData: "something"}
+ if err := outputWithFormat(ui, "yaml", nil, ui); err != 0 {
+ t.Fatal(err)
+ }
+ var newUi mockUi
+ err := yaml.Unmarshal([]byte(output), &newUi)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if newUi.SampleData != ui.SampleData {
+ t.Fatalf(`values not equal ("%s" != "%s")`,
+ newUi.SampleData,
+ ui.SampleData)
+ }
+}
+
+func TestTableFormatter(t *testing.T) {
+ ui := mockUi{t: t}
+ s := api.Secret{Data: map[string]interface{}{"k": "something"}}
+ if err := outputWithFormat(ui, "table", &s, &s); err != 0 {
+ t.Fatal(err)
+ }
+ if !strings.Contains(output, "something") {
+ t.Fatal("did not find 'something'")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/generate-root.go b/vendor/github.com/hashicorp/vault/command/generate-root.go
new file mode 100644
index 0000000..f013294
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/generate-root.go
@@ -0,0 +1,354 @@
+package command
+
+import (
+ "crypto/rand"
+ "encoding/base64"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/helper/password"
+ "github.com/hashicorp/vault/helper/pgpkeys"
+ "github.com/hashicorp/vault/helper/xor"
+ "github.com/hashicorp/vault/meta"
+)
+
+// GenerateRootCommand is a Command that generates a new root token.
+type GenerateRootCommand struct {
+ meta.Meta
+
+ // Key can be used to pre-seed the key. If it is set, it will not
+ // be asked with the `password` helper.
+ Key string
+
+ // The nonce for the rekey request to send along
+ Nonce string
+}
+
+func (c *GenerateRootCommand) Run(args []string) int {
+ var init, cancel, status, genotp bool
+ var nonce, decode, otp, pgpKey string
+ var pgpKeyArr pgpkeys.PubKeyFilesFlag
+ flags := c.Meta.FlagSet("generate-root", meta.FlagSetDefault)
+ flags.BoolVar(&init, "init", false, "")
+ flags.BoolVar(&cancel, "cancel", false, "")
+ flags.BoolVar(&status, "status", false, "")
+ flags.BoolVar(&genotp, "genotp", false, "")
+ flags.StringVar(&decode, "decode", "", "")
+ flags.StringVar(&otp, "otp", "", "")
+ flags.StringVar(&nonce, "nonce", "", "")
+ flags.Var(&pgpKeyArr, "pgp-key", "")
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ if genotp {
+ buf := make([]byte, 16)
+ readLen, err := rand.Read(buf)
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Error reading random bytes: %s", err))
+ return 1
+ }
+ if readLen != 16 {
+ c.Ui.Error(fmt.Sprintf("Read %d bytes when we should have read 16", readLen))
+ return 1
+ }
+ c.Ui.Output(fmt.Sprintf("OTP: %s", base64.StdEncoding.EncodeToString(buf)))
+ return 0
+ }
+
+ if len(decode) > 0 {
+ if len(otp) == 0 {
+ c.Ui.Error("Both the value to decode and the OTP must be passed in")
+ return 1
+ }
+ return c.decode(decode, otp)
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ // Check if the root generation is started
+ rootGenerationStatus, err := client.Sys().GenerateRootStatus()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Error reading root generation status: %s", err))
+ return 1
+ }
+
+ // If we are initing, or if we are not started but are not running a
+ // special function, check otp and pgpkey
+ checkOtpPgp := false
+ switch {
+ case init:
+ checkOtpPgp = true
+ case cancel:
+ case status:
+ case genotp:
+ case len(decode) != 0:
+ case rootGenerationStatus.Started:
+ default:
+ checkOtpPgp = true
+ }
+ if checkOtpPgp {
+ switch {
+ case len(otp) == 0 && (pgpKeyArr == nil || len(pgpKeyArr) == 0):
+ c.Ui.Error(c.Help())
+ return 1
+ case len(otp) != 0 && pgpKeyArr != nil && len(pgpKeyArr) != 0:
+ c.Ui.Error(c.Help())
+ return 1
+ case len(otp) != 0:
+ err := c.verifyOTP(otp)
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Error verifying the provided OTP: %s", err))
+ return 1
+ }
+ case pgpKeyArr != nil:
+ if len(pgpKeyArr) != 1 {
+ c.Ui.Error("Could not parse PGP key")
+ return 1
+ }
+ if len(pgpKeyArr[0]) == 0 {
+ c.Ui.Error("Got an empty PGP key")
+ return 1
+ }
+ pgpKey = pgpKeyArr[0]
+ default:
+ panic("unreachable case")
+ }
+ }
+
+ if nonce != "" {
+ c.Nonce = nonce
+ }
+
+ // Check if we are running doing any restricted variants
+ switch {
+ case init:
+ return c.initGenerateRoot(client, otp, pgpKey)
+ case cancel:
+ return c.cancelGenerateRoot(client)
+ case status:
+ return c.rootGenerationStatus(client)
+ }
+
+ // Start the root generation process if not started
+ if !rootGenerationStatus.Started {
+ rootGenerationStatus, err = client.Sys().GenerateRootInit(otp, pgpKey)
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Error initializing root generation: %s", err))
+ return 1
+ }
+ c.Nonce = rootGenerationStatus.Nonce
+ }
+
+ serverNonce := rootGenerationStatus.Nonce
+
+ // Get the unseal key
+ args = flags.Args()
+ key := c.Key
+ if len(args) > 0 {
+ key = args[0]
+ }
+ if key == "" {
+ c.Nonce = serverNonce
+ fmt.Printf("Root generation operation nonce: %s\n", serverNonce)
+ fmt.Printf("Key (will be hidden): ")
+ key, err = password.Read(os.Stdin)
+ fmt.Printf("\n")
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error attempting to ask for password. The raw error message\n"+
+ "is shown below, but the most common reason for this error is\n"+
+ "that you attempted to pipe a value into unseal or you're\n"+
+ "executing `vault generate-root` from outside of a terminal.\n\n"+
+ "You should use `vault generate-root` from a terminal for maximum\n"+
+ "security. If this isn't an option, the unseal key can be passed\n"+
+ "in using the first parameter.\n\n"+
+ "Raw error: %s", err))
+ return 1
+ }
+ }
+
+ // Provide the key, this may potentially complete the update
+ statusResp, err := client.Sys().GenerateRootUpdate(strings.TrimSpace(key), c.Nonce)
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Error attempting generate-root update: %s", err))
+ return 1
+ }
+
+ c.dumpStatus(statusResp)
+
+ return 0
+}
+
+func (c *GenerateRootCommand) verifyOTP(otp string) error {
+ if len(otp) == 0 {
+ return fmt.Errorf("No OTP passed in")
+ }
+ otpBytes, err := base64.StdEncoding.DecodeString(otp)
+ if err != nil {
+ return fmt.Errorf("Error decoding base64 OTP value: %s", err)
+ }
+ if otpBytes == nil || len(otpBytes) != 16 {
+ return fmt.Errorf("Decoded OTP value is invalid or wrong length")
+ }
+
+ return nil
+}
+
+func (c *GenerateRootCommand) decode(encodedVal, otp string) int {
+ tokenBytes, err := xor.XORBase64(encodedVal, otp)
+ if err != nil {
+ c.Ui.Error(err.Error())
+ return 1
+ }
+
+ token, err := uuid.FormatUUID(tokenBytes)
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Error formatting base64 token value: %v", err))
+ return 1
+ }
+
+ c.Ui.Output(fmt.Sprintf("Root token: %s", token))
+
+ return 0
+}
+
+// initGenerateRoot is used to start the generation process
+func (c *GenerateRootCommand) initGenerateRoot(client *api.Client, otp string, pgpKey string) int {
+ // Start the rekey
+ status, err := client.Sys().GenerateRootInit(otp, pgpKey)
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Error initializing root generation: %s", err))
+ return 1
+ }
+
+ c.dumpStatus(status)
+
+ return 0
+}
+
+// cancelGenerateRoot is used to abort the generation process
+func (c *GenerateRootCommand) cancelGenerateRoot(client *api.Client) int {
+ err := client.Sys().GenerateRootCancel()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Failed to cancel root generation: %s", err))
+ return 1
+ }
+ c.Ui.Output("Root generation canceled.")
+ return 0
+}
+
+// rootGenerationStatus is used just to fetch and dump the status
+func (c *GenerateRootCommand) rootGenerationStatus(client *api.Client) int {
+ // Check the status
+ status, err := client.Sys().GenerateRootStatus()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Error reading root generation status: %s", err))
+ return 1
+ }
+
+ c.dumpStatus(status)
+
+ return 0
+}
+
+// dumpStatus dumps the status to output
+func (c *GenerateRootCommand) dumpStatus(status *api.GenerateRootStatusResponse) {
+ // Dump the status
+ statString := fmt.Sprintf(
+ "Nonce: %s\n"+
+ "Started: %v\n"+
+ "Generate Root Progress: %d\n"+
+ "Required Keys: %d\n"+
+ "Complete: %t",
+ status.Nonce,
+ status.Started,
+ status.Progress,
+ status.Required,
+ status.Complete,
+ )
+ if len(status.PGPFingerprint) > 0 {
+ statString = fmt.Sprintf("%s\nPGP Fingerprint: %s", statString, status.PGPFingerprint)
+ }
+ if len(status.EncodedRootToken) > 0 {
+ statString = fmt.Sprintf("%s\n\nEncoded root token: %s", statString, status.EncodedRootToken)
+ }
+ c.Ui.Output(statString)
+}
+
+func (c *GenerateRootCommand) Synopsis() string {
+ return "Generates a new root token"
+}
+
+func (c *GenerateRootCommand) Help() string {
+ helpText := `
+Usage: vault generate-root [options] [key]
+
+ 'generate-root' is used to create a new root token.
+
+ Root generation can only be done when the vault is already unsealed. The
+ operation is done online, but requires that a threshold of the current unseal
+ keys be provided.
+
+ One (and only one) of the following must be provided when initializing the
+ root generation attempt:
+
+ 1) A 16-byte, base64-encoded One Time Password (OTP) provided in the '-otp'
+ flag; the token is XOR'd with this value before it is returned once the final
+ unseal key has been provided. The '-decode' operation can be used with this
+ value and the OTP to output the final token value. The '-genotp' flag can be
+ used to generate a suitable value.
+
+ or
+
+ 2) A file containing a PGP key (binary or base64-encoded) or a Keybase.io
+ username in the format of "keybase:" in the '-pgp-key' flag. The
+ final token value will be encrypted with this public key and base64-encoded.
+
+General Options:
+` + meta.GeneralOptionsUsage() + `
+Generate Root Options:
+
+ -init Initialize the root generation attempt. This can only
+ be done if no generation is already initiated.
+
+ -cancel Reset the root generation process by throwing away
+ prior unseal keys and the configuration.
+
+ -status Prints the status of the current attempt. This can be
+ used to see the status without attempting to provide
+ an unseal key.
+
+ -decode=abcd Decodes and outputs the generated root token. The OTP
+ used at '-init' time must be provided in the '-otp'
+ parameter.
+
+ -genotp Returns a high-quality OTP suitable for passing into
+ the '-init' method.
+
+ -otp=abcd The base64-encoded 16-byte OTP for use with the
+ '-init' or '-decode' methods.
+
+ -pgp-key A file on disk containing a binary- or base64-format
+ public PGP key, or a Keybase username specified as
+ "keybase:". The output root token will be
+ encrypted and base64-encoded, in order, with the given
+ public key.
+
+ -nonce=abcd The nonce provided at initialization time. This same
+ nonce value must be provided with each unseal key. If
+ the unseal key is not being passed in via the command
+ line the nonce parameter is not required, and will
+ instead be displayed with the key prompt.
+`
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/generate-root_test.go b/vendor/github.com/hashicorp/vault/command/generate-root_test.go
new file mode 100644
index 0000000..847400d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/generate-root_test.go
@@ -0,0 +1,294 @@
+package command
+
+import (
+ "encoding/base64"
+ "encoding/hex"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/helper/pgpkeys"
+ "github.com/hashicorp/vault/helper/xor"
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestGenerateRoot_Cancel(t *testing.T) {
+ core, _, _ := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &GenerateRootCommand{
+ Meta: meta.Meta{
+ Ui: ui,
+ },
+ }
+
+ otpBytes, err := vault.GenerateRandBytes(16)
+ if err != nil {
+ t.Fatal(err)
+ }
+ otp := base64.StdEncoding.EncodeToString(otpBytes)
+
+ args := []string{"-address", addr, "-init", "-otp", otp}
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ args = []string{"-address", addr, "-cancel"}
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ config, err := core.GenerateRootConfiguration()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if config != nil {
+ t.Fatal("should not have a config for root generation")
+ }
+}
+
+func TestGenerateRoot_status(t *testing.T) {
+ core, _, _ := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &GenerateRootCommand{
+ Meta: meta.Meta{
+ Ui: ui,
+ },
+ }
+
+ otpBytes, err := vault.GenerateRandBytes(16)
+ if err != nil {
+ t.Fatal(err)
+ }
+ otp := base64.StdEncoding.EncodeToString(otpBytes)
+
+ args := []string{"-address", addr, "-init", "-otp", otp}
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ args = []string{"-address", addr, "-status"}
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ if !strings.Contains(string(ui.OutputWriter.Bytes()), "Started: true") {
+ t.Fatalf("bad: %s", ui.OutputWriter.String())
+ }
+}
+
+func TestGenerateRoot_OTP(t *testing.T) {
+ core, ts, keys, _ := vault.TestCoreWithTokenStore(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &GenerateRootCommand{
+ Meta: meta.Meta{
+ Ui: ui,
+ },
+ }
+
+ // Generate an OTP
+ otpBytes, err := vault.GenerateRandBytes(16)
+ if err != nil {
+ t.Fatal(err)
+ }
+ otp := base64.StdEncoding.EncodeToString(otpBytes)
+
+ // Init the attempt
+ args := []string{
+ "-address", addr,
+ "-init",
+ "-otp", otp,
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ config, err := core.GenerateRootConfiguration()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ for _, key := range keys {
+ ui = new(cli.MockUi)
+ c = &GenerateRootCommand{
+ Key: hex.EncodeToString(key),
+ Meta: meta.Meta{
+ Ui: ui,
+ },
+ }
+
+ c.Nonce = config.Nonce
+
+ // Provide the key
+ args = []string{
+ "-address", addr,
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+ }
+
+ beforeNAfter := strings.Split(ui.OutputWriter.String(), "Encoded root token: ")
+ if len(beforeNAfter) != 2 {
+ t.Fatalf("did not find encoded root token in %s", ui.OutputWriter.String())
+ }
+ encodedToken := strings.TrimSpace(beforeNAfter[1])
+
+ decodedToken, err := xor.XORBase64(encodedToken, otp)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ token, err := uuid.FormatUUID(decodedToken)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ req := logical.TestRequest(t, logical.ReadOperation, "lookup-self")
+ req.ClientToken = token
+
+ resp, err := ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("error running token lookup-self: %v", err)
+ }
+ if resp == nil {
+ t.Fatalf("got nil resp with token lookup-self")
+ }
+ if resp.Data == nil {
+ t.Fatalf("got nil resp.Data with token lookup-self")
+ }
+
+ if resp.Data["orphan"].(bool) != true ||
+ resp.Data["ttl"].(int64) != 0 ||
+ resp.Data["num_uses"].(int) != 0 ||
+ resp.Data["meta"].(map[string]string) != nil ||
+ len(resp.Data["policies"].([]string)) != 1 ||
+ resp.Data["policies"].([]string)[0] != "root" {
+ t.Fatalf("bad: %#v", resp.Data)
+ }
+
+ // Clear the output and run a decode to verify we get the same result
+ ui.OutputWriter.Reset()
+ args = []string{
+ "-address", addr,
+ "-decode", encodedToken,
+ "-otp", otp,
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+ beforeNAfter = strings.Split(ui.OutputWriter.String(), "Root token: ")
+ if len(beforeNAfter) != 2 {
+ t.Fatalf("did not find decoded root token in %s", ui.OutputWriter.String())
+ }
+
+ outToken := strings.TrimSpace(beforeNAfter[1])
+ if outToken != token {
+ t.Fatalf("tokens do not match:\n%s\n%s", token, outToken)
+ }
+}
+
+func TestGenerateRoot_PGP(t *testing.T) {
+ core, ts, keys, _ := vault.TestCoreWithTokenStore(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &GenerateRootCommand{
+ Meta: meta.Meta{
+ Ui: ui,
+ },
+ }
+
+ tempDir, pubFiles, err := getPubKeyFiles(t)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tempDir)
+
+ // Init the attempt
+ args := []string{
+ "-address", addr,
+ "-init",
+ "-pgp-key", pubFiles[0],
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ config, err := core.GenerateRootConfiguration()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ for _, key := range keys {
+ c = &GenerateRootCommand{
+ Key: hex.EncodeToString(key),
+ Meta: meta.Meta{
+ Ui: ui,
+ },
+ }
+
+ c.Nonce = config.Nonce
+
+ // Provide the key
+ args = []string{
+ "-address", addr,
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+ }
+
+ beforeNAfter := strings.Split(ui.OutputWriter.String(), "Encoded root token: ")
+ if len(beforeNAfter) != 2 {
+ t.Fatalf("did not find encoded root token in %s", ui.OutputWriter.String())
+ }
+ encodedToken := strings.TrimSpace(beforeNAfter[1])
+
+ ptBuf, err := pgpkeys.DecryptBytes(encodedToken, pgpkeys.TestPrivKey1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if ptBuf == nil {
+ t.Fatal("returned plain text buffer is nil")
+ }
+
+ token := ptBuf.String()
+
+ req := logical.TestRequest(t, logical.ReadOperation, "lookup-self")
+ req.ClientToken = token
+
+ resp, err := ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("error running token lookup-self: %v", err)
+ }
+ if resp == nil {
+ t.Fatalf("got nil resp with token lookup-self")
+ }
+ if resp.Data == nil {
+ t.Fatalf("got nil resp.Data with token lookup-self")
+ }
+
+ if resp.Data["orphan"].(bool) != true ||
+ resp.Data["ttl"].(int64) != 0 ||
+ resp.Data["num_uses"].(int) != 0 ||
+ resp.Data["meta"].(map[string]string) != nil ||
+ len(resp.Data["policies"].([]string)) != 1 ||
+ resp.Data["policies"].([]string)[0] != "root" {
+ t.Fatalf("bad: %#v", resp.Data)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/init.go b/vendor/github.com/hashicorp/vault/command/init.go
new file mode 100644
index 0000000..4c638dc
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/init.go
@@ -0,0 +1,386 @@
+package command
+
+import (
+ "fmt"
+ "net/url"
+ "os"
+ "runtime"
+ "strings"
+
+ consulapi "github.com/hashicorp/consul/api"
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/helper/pgpkeys"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/physical"
+)
+
+// InitCommand is a Command that initializes a new Vault server.
+type InitCommand struct {
+ meta.Meta
+}
+
+func (c *InitCommand) Run(args []string) int {
+ var threshold, shares, storedShares, recoveryThreshold, recoveryShares int
+ var pgpKeys, recoveryPgpKeys, rootTokenPgpKey pgpkeys.PubKeyFilesFlag
+ var auto, check bool
+ var consulServiceName string
+ flags := c.Meta.FlagSet("init", meta.FlagSetDefault)
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ flags.IntVar(&shares, "key-shares", 5, "")
+ flags.IntVar(&threshold, "key-threshold", 3, "")
+ flags.IntVar(&storedShares, "stored-shares", 0, "")
+ flags.Var(&pgpKeys, "pgp-keys", "")
+ flags.Var(&rootTokenPgpKey, "root-token-pgp-key", "")
+ flags.IntVar(&recoveryShares, "recovery-shares", 5, "")
+ flags.IntVar(&recoveryThreshold, "recovery-threshold", 3, "")
+ flags.Var(&recoveryPgpKeys, "recovery-pgp-keys", "")
+ flags.BoolVar(&check, "check", false, "")
+ flags.BoolVar(&auto, "auto", false, "")
+ flags.StringVar(&consulServiceName, "consul-service", physical.DefaultServiceName, "")
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ initRequest := &api.InitRequest{
+ SecretShares: shares,
+ SecretThreshold: threshold,
+ StoredShares: storedShares,
+ PGPKeys: pgpKeys,
+ RecoveryShares: recoveryShares,
+ RecoveryThreshold: recoveryThreshold,
+ RecoveryPGPKeys: recoveryPgpKeys,
+ }
+
+ switch len(rootTokenPgpKey) {
+ case 0:
+ case 1:
+ initRequest.RootTokenPGPKey = rootTokenPgpKey[0]
+ default:
+ c.Ui.Error("Only one PGP key can be specified for encrypting the root token")
+ return 1
+ }
+
+ // If running in 'auto' mode, run service discovery based on environment
+ // variables of Consul.
+ if auto {
+
+ // Create configuration for Consul
+ consulConfig := consulapi.DefaultConfig()
+
+ // Create a client to communicate with Consul
+ consulClient, err := consulapi.NewClient(consulConfig)
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Failed to create Consul client:%v", err))
+ return 1
+ }
+
+ // Fetch Vault's protocol scheme from the client
+ vaultclient, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Failed to fetch Vault client: %v", err))
+ return 1
+ }
+
+ if vaultclient.Address() == "" {
+ c.Ui.Error("Failed to fetch Vault client address")
+ return 1
+ }
+
+ clientURL, err := url.Parse(vaultclient.Address())
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Failed to parse Vault address: %v", err))
+ return 1
+ }
+
+ if clientURL == nil {
+ c.Ui.Error("Failed to parse Vault client address")
+ return 1
+ }
+
+ var uninitializedVaults []string
+ var initializedVault string
+
+ // Query the nodes belonging to the cluster
+ if services, _, err := consulClient.Catalog().Service(consulServiceName, "", &consulapi.QueryOptions{AllowStale: true}); err == nil {
+ Loop:
+ for _, service := range services {
+ vaultAddress := &url.URL{
+ Scheme: clientURL.Scheme,
+ Host: fmt.Sprintf("%s:%d", service.ServiceAddress, service.ServicePort),
+ }
+
+ // Set VAULT_ADDR to the discovered node
+ os.Setenv(api.EnvVaultAddress, vaultAddress.String())
+
+ // Create a client to communicate with the discovered node
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Error initializing client: %v", err))
+ return 1
+ }
+
+ // Check the initialization status of the discovered node
+ inited, err := client.Sys().InitStatus()
+ switch {
+ case err != nil:
+ c.Ui.Error(fmt.Sprintf("Error checking initialization status of discovered node: %+q. Err: %v", vaultAddress.String(), err))
+ return 1
+ case inited:
+ // One of the nodes in the cluster is initialized. Break out.
+ initializedVault = vaultAddress.String()
+ break Loop
+ default:
+ // Vault is uninitialized.
+ uninitializedVaults = append(uninitializedVaults, vaultAddress.String())
+ }
+ }
+ }
+
+ export := "export"
+ quote := "'"
+ if runtime.GOOS == "windows" {
+ export = "set"
+ quote = ""
+ }
+
+ if initializedVault != "" {
+ vaultURL, err := url.Parse(initializedVault)
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Failed to parse Vault address: %+q. Err: %v", initializedVault, err))
+ }
+ c.Ui.Output(fmt.Sprintf("Discovered an initialized Vault node at %+q, using Consul service name %+q", vaultURL.String(), consulServiceName))
+ c.Ui.Output("\nSet the following environment variable to operate on the discovered Vault:\n")
+ c.Ui.Output(fmt.Sprintf("\t%s VAULT_ADDR=%s%s%s", export, quote, vaultURL.String(), quote))
+ return 0
+ }
+
+ switch len(uninitializedVaults) {
+ case 0:
+ c.Ui.Error(fmt.Sprintf("Failed to discover Vault nodes using Consul service name %+q", consulServiceName))
+ return 1
+ case 1:
+ // There was only one node found in the Vault cluster and it
+ // was uninitialized.
+
+ vaultURL, err := url.Parse(uninitializedVaults[0])
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Failed to parse Vault address: %+q. Err: %v", uninitializedVaults[0], err))
+ }
+
+ // Set the VAULT_ADDR to the discovered node. This will ensure
+ // that the client created will operate on the discovered node.
+ os.Setenv(api.EnvVaultAddress, vaultURL.String())
+
+ // Let the client know that initialization is perfomed on the
+ // discovered node.
+ c.Ui.Output(fmt.Sprintf("Discovered Vault at %+q using Consul service name %+q\n", vaultURL.String(), consulServiceName))
+
+ // Attempt initializing it
+ ret := c.runInit(check, initRequest)
+
+ // Regardless of success or failure, instruct client to update VAULT_ADDR
+ c.Ui.Output("\nSet the following environment variable to operate on the discovered Vault:\n")
+ c.Ui.Output(fmt.Sprintf("\t%s VAULT_ADDR=%s%s%s", export, quote, vaultURL.String(), quote))
+
+ return ret
+ default:
+ // If more than one Vault node were discovered, print out all of them,
+ // requiring the client to update VAULT_ADDR and to run init again.
+ c.Ui.Output(fmt.Sprintf("Discovered more than one uninitialized Vaults using Consul service name %+q\n", consulServiceName))
+ c.Ui.Output("To initialize these Vaults, set any *one* of the following environment variables and run 'vault init':")
+
+ // Print valid commands to make setting the variables easier
+ for _, vaultNode := range uninitializedVaults {
+ vaultURL, err := url.Parse(vaultNode)
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Failed to parse Vault address: %+q. Err: %v", vaultNode, err))
+ }
+ c.Ui.Output(fmt.Sprintf("\t%s VAULT_ADDR=%s%s%s", export, quote, vaultURL.String(), quote))
+
+ }
+ return 0
+ }
+ }
+
+ return c.runInit(check, initRequest)
+}
+
+func (c *InitCommand) runInit(check bool, initRequest *api.InitRequest) int {
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 1
+ }
+
+ if check {
+ return c.checkStatus(client)
+ }
+
+ resp, err := client.Sys().Init(initRequest)
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing Vault: %s", err))
+ return 1
+ }
+
+ for i, key := range resp.Keys {
+ if resp.KeysB64 != nil && len(resp.KeysB64) == len(resp.Keys) {
+ c.Ui.Output(fmt.Sprintf("Unseal Key %d: %s", i+1, resp.KeysB64[i]))
+ } else {
+ c.Ui.Output(fmt.Sprintf("Unseal Key %d: %s", i+1, key))
+ }
+ }
+ for i, key := range resp.RecoveryKeys {
+ if resp.RecoveryKeysB64 != nil && len(resp.RecoveryKeysB64) == len(resp.RecoveryKeys) {
+ c.Ui.Output(fmt.Sprintf("Recovery Key %d: %s", i+1, resp.RecoveryKeysB64[i]))
+ } else {
+ c.Ui.Output(fmt.Sprintf("Recovery Key %d: %s", i+1, key))
+ }
+ }
+
+ c.Ui.Output(fmt.Sprintf("Initial Root Token: %s", resp.RootToken))
+
+ if initRequest.StoredShares < 1 {
+ c.Ui.Output(fmt.Sprintf(
+ "\n"+
+ "Vault initialized with %d keys and a key threshold of %d. Please\n"+
+ "securely distribute the above keys. When the vault is re-sealed,\n"+
+ "restarted, or stopped, you must provide at least %d of these keys\n"+
+ "to unseal it again.\n\n"+
+ "Vault does not store the master key. Without at least %d keys,\n"+
+ "your vault will remain permanently sealed.",
+ initRequest.SecretShares,
+ initRequest.SecretThreshold,
+ initRequest.SecretThreshold,
+ initRequest.SecretThreshold,
+ ))
+ } else {
+ c.Ui.Output(
+ "\n" +
+ "Vault initialized successfully.",
+ )
+ }
+ if len(resp.RecoveryKeys) > 0 {
+ c.Ui.Output(fmt.Sprintf(
+ "\n"+
+ "Recovery key initialized with %d keys and a key threshold of %d. Please\n"+
+ "securely distribute the above keys.",
+ initRequest.RecoveryShares,
+ initRequest.RecoveryThreshold,
+ ))
+ }
+
+ return 0
+}
+
+func (c *InitCommand) checkStatus(client *api.Client) int {
+ inited, err := client.Sys().InitStatus()
+ switch {
+ case err != nil:
+ c.Ui.Error(fmt.Sprintf(
+ "Error checking initialization status: %s", err))
+ return 1
+ case inited:
+ c.Ui.Output("Vault has been initialized")
+ return 0
+ default:
+ c.Ui.Output("Vault is not initialized")
+ return 2
+ }
+}
+
+func (c *InitCommand) Synopsis() string {
+ return "Initialize a new Vault server"
+}
+
+func (c *InitCommand) Help() string {
+ helpText := `
+Usage: vault init [options]
+
+ Initialize a new Vault server.
+
+ This command connects to a Vault server and initializes it for the
+ first time. This sets up the initial set of master keys and the
+ backend data store structure.
+
+ This command can't be called on an already-initialized Vault server.
+
+General Options:
+` + meta.GeneralOptionsUsage() + `
+Init Options:
+
+ -check Don't actually initialize, just check if Vault is
+ already initialized. A return code of 0 means Vault
+ is initialized; a return code of 2 means Vault is not
+ initialized; a return code of 1 means an error was
+ encountered.
+
+ -key-shares=5 The number of key shares to split the master key
+ into.
+
+ -key-threshold=3 The number of key shares required to reconstruct
+ the master key.
+
+ -stored-shares=0 The number of unseal keys to store. Only used with
+ Vault HSM. Must currently be equivalent to the
+ number of shares.
+
+ -pgp-keys If provided, must be a comma-separated list of
+ files on disk containing binary- or base64-format
+ public PGP keys, or Keybase usernames specified as
+ "keybase:". The output unseal keys will
+ be encrypted and base64-encoded, in order, with the
+ given public keys. If you want to use them with the
+ 'vault unseal' command, you will need to base64-
+ decode and decrypt; this will be the plaintext
+ unseal key. When 'stored-shares' are not used, the
+ number of entries in this field must match 'key-shares'.
+ When 'stored-shares' are used, the number of entries
+ should match the difference between 'key-shares'
+ and 'stored-shares'.
+
+ -root-token-pgp-key If provided, a file on disk with a binary- or
+ base64-format public PGP key, or a Keybase username
+ specified as "keybase:". The output root
+ token will be encrypted and base64-encoded, in
+ order, with the given public key. You will need
+ to base64-decode and decrypt the result.
+
+ -recovery-shares=5 The number of key shares to split the recovery key
+ into. Only used with Vault HSM.
+
+ -recovery-threshold=3 The number of key shares required to reconstruct
+ the recovery key. Only used with Vault HSM.
+
+ -recovery-pgp-keys If provided, behaves like "pgp-keys" but for the
+ recovery key shares. Only used with Vault HSM.
+
+ -auto If set, performs service discovery using Consul.
+ When all the nodes of a Vault cluster are
+ registered with Consul, setting this flag will
+ trigger service discovery using the service name
+ with which Vault nodes are registered. This option
+ works well when each Vault cluster is registered
+ under a unique service name. Note that, when Consul
+ is serving as Vault's HA backend, Vault nodes are
+ registered with Consul by default. The service name
+ can be changed using 'consul-service' flag. Ensure
+ that environment variables required to communicate
+ with Consul, like (CONSUL_HTTP_ADDR,
+ CONSUL_HTTP_TOKEN, CONSUL_HTTP_SSL, et al) are
+ properly set. When only one Vault node is
+ discovered, it will be initialized and when more
+ than one Vault node is discovered, they will be
+ output for easy selection.
+
+ -consul-service Service name under which all the nodes of a Vault
+ cluster are registered with Consul. Note that, when
+ Vault uses Consul as its HA backend, by default,
+ Vault will register itself as a service with Consul
+ with the service name "vault". This name can be
+ modified in Vault's configuration file, using the
+ "service" option for the Consul backend.
+`
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/init_test.go b/vendor/github.com/hashicorp/vault/command/init_test.go
new file mode 100644
index 0000000..e09ba80
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/init_test.go
@@ -0,0 +1,343 @@
+package command
+
+import (
+ "bytes"
+ "encoding/base64"
+ "os"
+ "reflect"
+ "regexp"
+ "strings"
+ "testing"
+
+ "github.com/hashicorp/vault/helper/pgpkeys"
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/keybase/go-crypto/openpgp"
+ "github.com/keybase/go-crypto/openpgp/packet"
+ "github.com/mitchellh/cli"
+)
+
+func TestInit(t *testing.T) {
+ ui := new(cli.MockUi)
+ c := &InitCommand{
+ Meta: meta.Meta{
+ Ui: ui,
+ },
+ }
+
+ core := vault.TestCore(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ init, err := core.Initialized()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if init {
+ t.Fatal("should not be initialized")
+ }
+
+ args := []string{"-address", addr}
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ init, err = core.Initialized()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if !init {
+ t.Fatal("should be initialized")
+ }
+
+ sealConf, err := core.SealAccess().BarrierConfig()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ expected := &vault.SealConfig{
+ Type: "shamir",
+ SecretShares: 5,
+ SecretThreshold: 3,
+ }
+ if !reflect.DeepEqual(expected, sealConf) {
+ t.Fatalf("expected:\n%#v\ngot:\n%#v\n", expected, sealConf)
+ }
+}
+
+func TestInit_Check(t *testing.T) {
+ ui := new(cli.MockUi)
+ c := &InitCommand{
+ Meta: meta.Meta{
+ Ui: ui,
+ },
+ }
+
+ core := vault.TestCore(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ // Should return 2, not initialized
+ args := []string{"-address", addr, "-check"}
+ if code := c.Run(args); code != 2 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ // Now initialize it
+ args = []string{"-address", addr}
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ // Should return 0, initialized
+ args = []string{"-address", addr, "-check"}
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ init, err := core.Initialized()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if !init {
+ t.Fatal("should be initialized")
+ }
+}
+
+func TestInit_custom(t *testing.T) {
+ ui := new(cli.MockUi)
+ c := &InitCommand{
+ Meta: meta.Meta{
+ Ui: ui,
+ },
+ }
+
+ core := vault.TestCore(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ init, err := core.Initialized()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if init {
+ t.Fatal("should not be initialized")
+ }
+
+ args := []string{
+ "-address", addr,
+ "-key-shares", "7",
+ "-key-threshold", "3",
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ init, err = core.Initialized()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if !init {
+ t.Fatal("should be initialized")
+ }
+
+ sealConf, err := core.SealAccess().BarrierConfig()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ expected := &vault.SealConfig{
+ Type: "shamir",
+ SecretShares: 7,
+ SecretThreshold: 3,
+ }
+ if !reflect.DeepEqual(expected, sealConf) {
+ t.Fatalf("expected:\n%#v\ngot:\n%#v\n", expected, sealConf)
+ }
+
+ re, err := regexp.Compile("\\s+Initial Root Token:\\s+(.*)")
+ if err != nil {
+ t.Fatalf("Error compiling regex: %s", err)
+ }
+ matches := re.FindAllStringSubmatch(ui.OutputWriter.String(), -1)
+ if len(matches) != 1 {
+ t.Fatalf("Unexpected number of tokens found, got %d", len(matches))
+ }
+
+ rootToken := matches[0][1]
+
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("Error fetching client: %v", err)
+ }
+
+ client.SetToken(rootToken)
+
+ re, err = regexp.Compile("\\s*Unseal Key \\d+: (.*)")
+ if err != nil {
+ t.Fatalf("Error compiling regex: %s", err)
+ }
+ matches = re.FindAllStringSubmatch(ui.OutputWriter.String(), -1)
+ if len(matches) != 7 {
+ t.Fatalf("Unexpected number of keys returned, got %d, matches was \n\n%#v\n\n, input was \n\n%s\n\n", len(matches), matches, ui.OutputWriter.String())
+ }
+
+ var unsealed bool
+ for i := 0; i < 3; i++ {
+ decodedKey, err := base64.StdEncoding.DecodeString(strings.TrimSpace(matches[i][1]))
+ if err != nil {
+ t.Fatalf("err decoding key %v: %v", matches[i][1], err)
+ }
+ unsealed, err = core.Unseal(decodedKey)
+ if err != nil {
+ t.Fatalf("err during unseal: %v; key was %v", err, matches[i][1])
+ }
+ }
+ if !unsealed {
+ t.Fatal("expected to be unsealed")
+ }
+
+ tokenInfo, err := client.Auth().Token().LookupSelf()
+ if err != nil {
+ t.Fatalf("Error looking up root token info: %v", err)
+ }
+
+ if tokenInfo.Data["policies"].([]interface{})[0].(string) != "root" {
+ t.Fatalf("expected root policy")
+ }
+}
+
+func TestInit_PGP(t *testing.T) {
+ ui := new(cli.MockUi)
+ c := &InitCommand{
+ Meta: meta.Meta{
+ Ui: ui,
+ },
+ }
+
+ core := vault.TestCore(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ init, err := core.Initialized()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if init {
+ t.Fatal("should not be initialized")
+ }
+
+ tempDir, pubFiles, err := getPubKeyFiles(t)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tempDir)
+
+ args := []string{
+ "-address", addr,
+ "-key-shares", "2",
+ "-pgp-keys", pubFiles[0] + ",@" + pubFiles[1] + "," + pubFiles[2],
+ "-key-threshold", "2",
+ "-root-token-pgp-key", pubFiles[0],
+ }
+
+ // This should fail, as key-shares does not match pgp-keys size
+ if code := c.Run(args); code == 0 {
+ t.Fatalf("bad (command should have failed): %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ args = []string{
+ "-address", addr,
+ "-key-shares", "4",
+ "-pgp-keys", pubFiles[0] + ",@" + pubFiles[1] + "," + pubFiles[2] + "," + pubFiles[3],
+ "-key-threshold", "2",
+ "-root-token-pgp-key", pubFiles[0],
+ }
+
+ ui.OutputWriter.Reset()
+
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ init, err = core.Initialized()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if !init {
+ t.Fatal("should be initialized")
+ }
+
+ sealConf, err := core.SealAccess().BarrierConfig()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ pgpKeys := []string{}
+ for _, pubFile := range pubFiles {
+ pub, err := pgpkeys.ReadPGPFile(pubFile)
+ if err != nil {
+ t.Fatalf("bad: %v", err)
+ }
+ pgpKeys = append(pgpKeys, pub)
+ }
+
+ expected := &vault.SealConfig{
+ Type: "shamir",
+ SecretShares: 4,
+ SecretThreshold: 2,
+ PGPKeys: pgpKeys,
+ }
+ if !reflect.DeepEqual(expected, sealConf) {
+ t.Fatalf("expected:\n%#v\ngot:\n%#v\n", expected, sealConf)
+ }
+
+ re, err := regexp.Compile("\\s+Initial Root Token:\\s+(.*)")
+ if err != nil {
+ t.Fatalf("Error compiling regex: %s", err)
+ }
+ matches := re.FindAllStringSubmatch(ui.OutputWriter.String(), -1)
+ if len(matches) != 1 {
+ t.Fatalf("Unexpected number of tokens found, got %d", len(matches))
+ }
+
+ encRootToken := matches[0][1]
+ privKeyBytes, err := base64.StdEncoding.DecodeString(pgpkeys.TestPrivKey1)
+ if err != nil {
+ t.Fatalf("error decoding private key: %v", err)
+ }
+ ptBuf := bytes.NewBuffer(nil)
+ entity, err := openpgp.ReadEntity(packet.NewReader(bytes.NewBuffer(privKeyBytes)))
+ if err != nil {
+ t.Fatalf("Error parsing private key: %s", err)
+ }
+ var rootBytes []byte
+ rootBytes, err = base64.StdEncoding.DecodeString(encRootToken)
+ if err != nil {
+ t.Fatalf("Error decoding root token: %s", err)
+ }
+ entityList := &openpgp.EntityList{entity}
+ md, err := openpgp.ReadMessage(bytes.NewBuffer(rootBytes), entityList, nil, nil)
+ if err != nil {
+ t.Fatalf("Error decrypting root token: %s", err)
+ }
+ ptBuf.ReadFrom(md.UnverifiedBody)
+ rootToken := ptBuf.String()
+
+ parseDecryptAndTestUnsealKeys(t, ui.OutputWriter.String(), rootToken, false, nil, nil, core)
+
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("Error fetching client: %v", err)
+ }
+
+ client.SetToken(rootToken)
+
+ tokenInfo, err := client.Auth().Token().LookupSelf()
+ if err != nil {
+ t.Fatalf("Error looking up root token info: %v", err)
+ }
+
+ if tokenInfo.Data["policies"].([]interface{})[0].(string) != "root" {
+ t.Fatalf("expected root policy")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/key_status.go b/vendor/github.com/hashicorp/vault/command/key_status.go
new file mode 100644
index 0000000..ff1b086
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/key_status.go
@@ -0,0 +1,55 @@
+package command
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/meta"
+)
+
+// KeyStatusCommand is a Command that provides information about the key status
+type KeyStatusCommand struct {
+ meta.Meta
+}
+
+func (c *KeyStatusCommand) Run(args []string) int {
+ flags := c.Meta.FlagSet("key-status", meta.FlagSetDefault)
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ status, err := client.Sys().KeyStatus()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error reading audits: %s", err))
+ return 2
+ }
+
+ c.Ui.Output(fmt.Sprintf("Key Term: %d", status.Term))
+ c.Ui.Output(fmt.Sprintf("Installation Time: %v", status.InstallTime))
+ return 0
+}
+
+func (c *KeyStatusCommand) Synopsis() string {
+ return "Provides information about the active encryption key"
+}
+
+func (c *KeyStatusCommand) Help() string {
+ helpText := `
+Usage: vault key-status [options]
+
+ Provides information about the active encryption key. Specifically,
+ the current key term and the key installation time.
+
+General Options:
+` + meta.GeneralOptionsUsage()
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/key_status_test.go b/vendor/github.com/hashicorp/vault/command/key_status_test.go
new file mode 100644
index 0000000..0adcefa
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/key_status_test.go
@@ -0,0 +1,31 @@
+package command
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestKeyStatus(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &KeyStatusCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/list.go b/vendor/github.com/hashicorp/vault/command/list.go
new file mode 100644
index 0000000..71bf388
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/list.go
@@ -0,0 +1,97 @@
+package command
+
+import (
+ "flag"
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/meta"
+)
+
+// ListCommand is a Command that lists data from the Vault.
+type ListCommand struct {
+ meta.Meta
+}
+
+func (c *ListCommand) Run(args []string) int {
+ var format string
+ var err error
+ var secret *api.Secret
+ var flags *flag.FlagSet
+ flags = c.Meta.FlagSet("list", meta.FlagSetDefault)
+ flags.StringVar(&format, "format", "table", "")
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ args = flags.Args()
+ if len(args) != 1 || len(args[0]) == 0 {
+ c.Ui.Error("list expects one argument")
+ flags.Usage()
+ return 1
+ }
+
+ path := args[0]
+ if path[0] == '/' {
+ path = path[1:]
+ }
+
+ if !strings.HasSuffix(path, "/") {
+ path = path + "/"
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ secret, err = client.Logical().List(path)
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error reading %s: %s", path, err))
+ return 1
+ }
+ if secret == nil {
+ c.Ui.Error(fmt.Sprintf(
+ "No value found at %s", path))
+ return 1
+ }
+ if secret.WrapInfo != nil && secret.WrapInfo.TTL != 0 {
+ return OutputSecret(c.Ui, format, secret)
+ }
+
+ if secret.Data["keys"] == nil {
+ c.Ui.Error("No entries found")
+ return 0
+ }
+
+ return OutputList(c.Ui, format, secret)
+}
+
+func (c *ListCommand) Synopsis() string {
+ return "List data or secrets in Vault"
+}
+
+func (c *ListCommand) Help() string {
+ helpText :=
+ `
+Usage: vault list [options] path
+
+ List data from Vault.
+
+ Retrieve a listing of available data. The data returned, if any, is backend-
+ and endpoint-specific.
+
+General Options:
+` + meta.GeneralOptionsUsage() + `
+Read Options:
+
+ -format=table The format for output. By default it is a whitespace-
+ delimited table. This can also be json or yaml.
+`
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/list_test.go b/vendor/github.com/hashicorp/vault/command/list_test.go
new file mode 100644
index 0000000..1f75c0b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/list_test.go
@@ -0,0 +1,71 @@
+package command
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestList(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &ReadCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "-format", "json",
+ "secret",
+ }
+
+ // Run once so the client is setup, ignore errors
+ c.Run(args)
+
+ // Get the client so we can write data
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ data := map[string]interface{}{"value": "bar"}
+ if _, err := client.Logical().Write("secret/foo", data); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ data = map[string]interface{}{"value": "bar"}
+ if _, err := client.Logical().Write("secret/foo/bar", data); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ secret, err := client.Logical().List("secret/")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if secret == nil {
+ t.Fatalf("err: No value found at secret/")
+ }
+
+ if secret.Data == nil {
+ t.Fatalf("err: Data not found")
+ }
+
+ exp := map[string]interface{}{
+ "keys": []interface{}{"foo", "foo/"},
+ }
+
+ if !reflect.DeepEqual(secret.Data, exp) {
+ t.Fatalf("err: expected %#v, got %#v", exp, secret.Data)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/mount.go b/vendor/github.com/hashicorp/vault/command/mount.go
new file mode 100644
index 0000000..eb2b53a
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/mount.go
@@ -0,0 +1,121 @@
+package command
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/meta"
+)
+
+// MountCommand is a Command that mounts a new mount.
+type MountCommand struct {
+ meta.Meta
+}
+
+func (c *MountCommand) Run(args []string) int {
+ var description, path, defaultLeaseTTL, maxLeaseTTL string
+ var local, forceNoCache bool
+ flags := c.Meta.FlagSet("mount", meta.FlagSetDefault)
+ flags.StringVar(&description, "description", "", "")
+ flags.StringVar(&path, "path", "", "")
+ flags.StringVar(&defaultLeaseTTL, "default-lease-ttl", "", "")
+ flags.StringVar(&maxLeaseTTL, "max-lease-ttl", "", "")
+ flags.BoolVar(&forceNoCache, "force-no-cache", false, "")
+ flags.BoolVar(&local, "local", false, "")
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ args = flags.Args()
+ if len(args) != 1 {
+ flags.Usage()
+ c.Ui.Error(fmt.Sprintf(
+ "\nmount expects one argument: the type to mount."))
+ return 1
+ }
+
+ mountType := args[0]
+
+ // If no path is specified, we default the path to the backend type
+ if path == "" {
+ path = mountType
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ mountInfo := &api.MountInput{
+ Type: mountType,
+ Description: description,
+ Config: api.MountConfigInput{
+ DefaultLeaseTTL: defaultLeaseTTL,
+ MaxLeaseTTL: maxLeaseTTL,
+ ForceNoCache: forceNoCache,
+ },
+ Local: local,
+ }
+
+ if err := client.Sys().Mount(path, mountInfo); err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Mount error: %s", err))
+ return 2
+ }
+
+ c.Ui.Output(fmt.Sprintf(
+ "Successfully mounted '%s' at '%s'!",
+ mountType, path))
+
+ return 0
+}
+
+func (c *MountCommand) Synopsis() string {
+ return "Mount a logical backend"
+}
+
+func (c *MountCommand) Help() string {
+ helpText := `
+Usage: vault mount [options] type
+
+ Mount a logical backend.
+
+ This command mounts a logical backend for storing and/or generating
+ secrets.
+
+General Options:
+` + meta.GeneralOptionsUsage() + `
+Mount Options:
+
+ -description= Human-friendly description of the purpose for
+ the mount. This shows up in the mounts command.
+
+ -path= Mount point for the logical backend. This
+ defaults to the type of the mount.
+
+ -default-lease-ttl= Default lease time-to-live for this backend.
+ If not specified, uses the global default, or
+ the previously set value. Set to '0' to
+ explicitly set it to use the global default.
+
+ -max-lease-ttl= Max lease time-to-live for this backend.
+ If not specified, uses the global default, or
+ the previously set value. Set to '0' to
+ explicitly set it to use the global default.
+
+ -force-no-cache Forces the backend to disable caching. If not
+ specified, uses the global default. This does
+ not affect caching of the underlying encrypted
+ data storage.
+
+ -local Mark the mount as a local mount. Local mounts
+ are not replicated nor (if a secondary)
+ removed by replication.
+
+`
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/mount_test.go b/vendor/github.com/hashicorp/vault/command/mount_test.go
new file mode 100644
index 0000000..314ac13
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/mount_test.go
@@ -0,0 +1,50 @@
+package command
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestMount(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &MountCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "generic",
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ mounts, err := client.Sys().ListMounts()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ mount, ok := mounts["generic/"]
+ if !ok {
+ t.Fatal("should have generic mount")
+ }
+ if mount.Type != "generic" {
+ t.Fatal("should be generic type")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/mount_tune.go b/vendor/github.com/hashicorp/vault/command/mount_tune.go
new file mode 100644
index 0000000..e1efdd2
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/mount_tune.go
@@ -0,0 +1,89 @@
+package command
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/meta"
+)
+
+// MountTuneCommand is a Command that remounts a mounted secret backend
+// to a new endpoint.
+type MountTuneCommand struct {
+ meta.Meta
+}
+
+func (c *MountTuneCommand) Run(args []string) int {
+ var defaultLeaseTTL, maxLeaseTTL string
+ flags := c.Meta.FlagSet("mount-tune", meta.FlagSetDefault)
+ flags.StringVar(&defaultLeaseTTL, "default-lease-ttl", "", "")
+ flags.StringVar(&maxLeaseTTL, "max-lease-ttl", "", "")
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ args = flags.Args()
+ if len(args) != 1 {
+ flags.Usage()
+ c.Ui.Error(fmt.Sprintf(
+ "\nmount-tune expects one arguments: the mount path"))
+ return 1
+ }
+
+ path := args[0]
+
+ mountConfig := api.MountConfigInput{
+ DefaultLeaseTTL: defaultLeaseTTL,
+ MaxLeaseTTL: maxLeaseTTL,
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ if err := client.Sys().TuneMount(path, mountConfig); err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Mount tune error: %s", err))
+ return 2
+ }
+
+ c.Ui.Output(fmt.Sprintf(
+ "Successfully tuned mount '%s'!", path))
+
+ return 0
+}
+
+func (c *MountTuneCommand) Synopsis() string {
+ return "Tune mount configuration parameters"
+}
+
+func (c *MountTuneCommand) Help() string {
+ helpText := `
+ Usage: vault mount-tune [options] path
+
+ Tune configuration options for a mounted secret backend.
+
+ Example: vault mount-tune -default-lease-ttl="24h" secret
+
+General Options:
+` + meta.GeneralOptionsUsage() + `
+Mount Options:
+
+ -default-lease-ttl= Default lease time-to-live for this backend.
+ If not specified, uses the system default, or
+ the previously set value. Set to 'system' to
+ explicitly set it to use the system default.
+
+ -max-lease-ttl= Max lease time-to-live for this backend.
+ If not specified, uses the system default, or
+ the previously set value. Set to 'system' to
+ explicitly set it to use the system default.
+
+`
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/mounts.go b/vendor/github.com/hashicorp/vault/command/mounts.go
new file mode 100644
index 0000000..d918d67
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/mounts.go
@@ -0,0 +1,96 @@
+package command
+
+import (
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/vault/meta"
+ "github.com/ryanuber/columnize"
+)
+
+// MountsCommand is a Command that lists the mounts.
+type MountsCommand struct {
+ meta.Meta
+}
+
+func (c *MountsCommand) Run(args []string) int {
+ flags := c.Meta.FlagSet("mounts", meta.FlagSetDefault)
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ mounts, err := client.Sys().ListMounts()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error reading mounts: %s", err))
+ return 2
+ }
+
+ paths := make([]string, 0, len(mounts))
+ for path := range mounts {
+ paths = append(paths, path)
+ }
+ sort.Strings(paths)
+
+ columns := []string{"Path | Type | Default TTL | Max TTL | Force No Cache | Replication Behavior | Description"}
+ for _, path := range paths {
+ mount := mounts[path]
+ defTTL := "system"
+ switch {
+ case mount.Type == "system":
+ defTTL = "n/a"
+ case mount.Type == "cubbyhole":
+ defTTL = "n/a"
+ case mount.Config.DefaultLeaseTTL != 0:
+ defTTL = strconv.Itoa(mount.Config.DefaultLeaseTTL)
+ }
+ maxTTL := "system"
+ switch {
+ case mount.Type == "system":
+ maxTTL = "n/a"
+ case mount.Type == "cubbyhole":
+ maxTTL = "n/a"
+ case mount.Config.MaxLeaseTTL != 0:
+ maxTTL = strconv.Itoa(mount.Config.MaxLeaseTTL)
+ }
+ replicatedBehavior := "replicated"
+ if mount.Local {
+ replicatedBehavior = "local"
+ }
+ columns = append(columns, fmt.Sprintf(
+ "%s | %s | %s | %s | %v | %s | %s", path, mount.Type, defTTL, maxTTL,
+ mount.Config.ForceNoCache, replicatedBehavior, mount.Description))
+ }
+
+ c.Ui.Output(columnize.SimpleFormat(columns))
+ return 0
+}
+
+func (c *MountsCommand) Synopsis() string {
+ return "Lists mounted backends in Vault"
+}
+
+func (c *MountsCommand) Help() string {
+ helpText := `
+Usage: vault mounts [options]
+
+ Outputs information about the mounted backends.
+
+ This command lists the mounted backends, their mount points, the
+ configured TTLs, and a human-friendly description of the mount point.
+ A TTL of 'system' indicates that the system default is being used.
+
+General Options:
+` + meta.GeneralOptionsUsage()
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/mounts_test.go b/vendor/github.com/hashicorp/vault/command/mounts_test.go
new file mode 100644
index 0000000..55e5f67
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/mounts_test.go
@@ -0,0 +1,31 @@
+package command
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestMounts(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &MountsCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/path_help.go b/vendor/github.com/hashicorp/vault/command/path_help.go
new file mode 100644
index 0000000..6eed960
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/path_help.go
@@ -0,0 +1,76 @@
+package command
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/meta"
+)
+
+// PathHelpCommand is a Command that lists the mounts.
+type PathHelpCommand struct {
+ meta.Meta
+}
+
+func (c *PathHelpCommand) Run(args []string) int {
+ flags := c.Meta.FlagSet("help", meta.FlagSetDefault)
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ args = flags.Args()
+ if len(args) != 1 {
+ flags.Usage()
+ c.Ui.Error("\nhelp expects a single argument")
+ return 1
+ }
+
+ path := args[0]
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ help, err := client.Help(path)
+ if err != nil {
+ if strings.Contains(err.Error(), "Vault is sealed") {
+ c.Ui.Error(`Error: Vault is sealed.
+
+The path-help command requires the vault to be unsealed so that
+mount points of secret backends are known.`)
+ } else {
+ c.Ui.Error(fmt.Sprintf(
+ "Error reading help: %s", err))
+ }
+ return 1
+ }
+
+ c.Ui.Output(help.Help)
+ return 0
+}
+
+func (c *PathHelpCommand) Synopsis() string {
+ return "Look up the help for a path"
+}
+
+func (c *PathHelpCommand) Help() string {
+ helpText := `
+Usage: vault path-help [options] path
+
+ Look up the help for a path.
+
+ All endpoints in Vault from system paths, secret paths, and credential
+ providers provide built-in help. This command looks up and outputs that
+ help.
+
+ The command requires that the vault be unsealed, because otherwise
+ the mount points of the backends are unknown.
+
+General Options:
+` + meta.GeneralOptionsUsage()
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/path_help_test.go b/vendor/github.com/hashicorp/vault/command/path_help_test.go
new file mode 100644
index 0000000..46219ba
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/path_help_test.go
@@ -0,0 +1,32 @@
+package command
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestHelp(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &PathHelpCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "sys/mounts",
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/pgp_test.go b/vendor/github.com/hashicorp/vault/command/pgp_test.go
new file mode 100644
index 0000000..c368e31
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/pgp_test.go
@@ -0,0 +1,181 @@
+package command
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/hex"
+ "io/ioutil"
+ "reflect"
+ "regexp"
+ "sort"
+ "testing"
+
+ "github.com/hashicorp/vault/helper/pgpkeys"
+ "github.com/hashicorp/vault/vault"
+
+ "github.com/keybase/go-crypto/openpgp"
+ "github.com/keybase/go-crypto/openpgp/packet"
+)
+
+func getPubKeyFiles(t *testing.T) (string, []string, error) {
+ tempDir, err := ioutil.TempDir("", "vault-test")
+ if err != nil {
+ t.Fatalf("Error creating temporary directory: %s", err)
+ }
+
+ pubFiles := []string{
+ tempDir + "/pubkey1",
+ tempDir + "/pubkey2",
+ tempDir + "/pubkey3",
+ tempDir + "/aapubkey1",
+ }
+ decoder := base64.StdEncoding
+ pub1Bytes, err := decoder.DecodeString(pgpkeys.TestPubKey1)
+ if err != nil {
+ t.Fatalf("Error decoding bytes for public key 1: %s", err)
+ }
+ err = ioutil.WriteFile(pubFiles[0], pub1Bytes, 0755)
+ if err != nil {
+ t.Fatalf("Error writing pub key 1 to temp file: %s", err)
+ }
+ pub2Bytes, err := decoder.DecodeString(pgpkeys.TestPubKey2)
+ if err != nil {
+ t.Fatalf("Error decoding bytes for public key 2: %s", err)
+ }
+ err = ioutil.WriteFile(pubFiles[1], pub2Bytes, 0755)
+ if err != nil {
+ t.Fatalf("Error writing pub key 2 to temp file: %s", err)
+ }
+ pub3Bytes, err := decoder.DecodeString(pgpkeys.TestPubKey3)
+ if err != nil {
+ t.Fatalf("Error decoding bytes for public key 3: %s", err)
+ }
+ err = ioutil.WriteFile(pubFiles[2], pub3Bytes, 0755)
+ if err != nil {
+ t.Fatalf("Error writing pub key 3 to temp file: %s", err)
+ }
+ err = ioutil.WriteFile(pubFiles[3], []byte(pgpkeys.TestAAPubKey1), 0755)
+ if err != nil {
+ t.Fatalf("Error writing aa pub key 1 to temp file: %s", err)
+ }
+
+ return tempDir, pubFiles, nil
+}
+
+func parseDecryptAndTestUnsealKeys(t *testing.T,
+ input, rootToken string,
+ fingerprints bool,
+ backupKeys map[string][]string,
+ backupKeysB64 map[string][]string,
+ core *vault.Core) {
+
+ decoder := base64.StdEncoding
+ priv1Bytes, err := decoder.DecodeString(pgpkeys.TestPrivKey1)
+ if err != nil {
+ t.Fatalf("Error decoding bytes for private key 1: %s", err)
+ }
+ priv2Bytes, err := decoder.DecodeString(pgpkeys.TestPrivKey2)
+ if err != nil {
+ t.Fatalf("Error decoding bytes for private key 2: %s", err)
+ }
+ priv3Bytes, err := decoder.DecodeString(pgpkeys.TestPrivKey3)
+ if err != nil {
+ t.Fatalf("Error decoding bytes for private key 3: %s", err)
+ }
+
+ privBytes := [][]byte{
+ priv1Bytes,
+ priv2Bytes,
+ priv3Bytes,
+ }
+
+ testFunc := func(bkeys map[string][]string) {
+ var re *regexp.Regexp
+ if fingerprints {
+ re, err = regexp.Compile("\\s*Key\\s+\\d+\\s+fingerprint:\\s+([0-9a-fA-F]+);\\s+value:\\s+(.*)")
+ } else {
+ re, err = regexp.Compile("\\s*Key\\s+\\d+:\\s+(.*)")
+ }
+ if err != nil {
+ t.Fatalf("Error compiling regex: %s", err)
+ }
+ matches := re.FindAllStringSubmatch(input, -1)
+ if len(matches) != 4 {
+ t.Fatalf("Unexpected number of keys returned, got %d, matches was \n\n%#v\n\n, input was \n\n%s\n\n", len(matches), matches, input)
+ }
+
+ encodedKeys := []string{}
+ matchedFingerprints := []string{}
+ for _, tuple := range matches {
+ if fingerprints {
+ if len(tuple) != 3 {
+ t.Fatalf("Key not found: %#v", tuple)
+ }
+ matchedFingerprints = append(matchedFingerprints, tuple[1])
+ encodedKeys = append(encodedKeys, tuple[2])
+ } else {
+ if len(tuple) != 2 {
+ t.Fatalf("Key not found: %#v", tuple)
+ }
+ encodedKeys = append(encodedKeys, tuple[1])
+ }
+ }
+
+ if bkeys != nil && len(matchedFingerprints) != 0 {
+ testMap := map[string][]string{}
+ for i, v := range matchedFingerprints {
+ testMap[v] = append(testMap[v], encodedKeys[i])
+ sort.Strings(testMap[v])
+ }
+ if !reflect.DeepEqual(testMap, bkeys) {
+ t.Fatalf("test map and backup map do not match, test map is\n%#v\nbackup map is\n%#v", testMap, bkeys)
+ }
+ }
+
+ unsealKeys := []string{}
+ ptBuf := bytes.NewBuffer(nil)
+ for i, privKeyBytes := range privBytes {
+ if i > 2 {
+ break
+ }
+ ptBuf.Reset()
+ entity, err := openpgp.ReadEntity(packet.NewReader(bytes.NewBuffer(privKeyBytes)))
+ if err != nil {
+ t.Fatalf("Error parsing private key %d: %s", i, err)
+ }
+ var keyBytes []byte
+ keyBytes, err = base64.StdEncoding.DecodeString(encodedKeys[i])
+ if err != nil {
+ t.Fatalf("Error decoding key %d: %s", i, err)
+ }
+ entityList := &openpgp.EntityList{entity}
+ md, err := openpgp.ReadMessage(bytes.NewBuffer(keyBytes), entityList, nil, nil)
+ if err != nil {
+ t.Fatalf("Error decrypting with key %d (%s): %s", i, encodedKeys[i], err)
+ }
+ ptBuf.ReadFrom(md.UnverifiedBody)
+ unsealKeys = append(unsealKeys, ptBuf.String())
+ }
+
+ err = core.Seal(rootToken)
+ if err != nil {
+ t.Fatalf("Error sealing vault with provided root token: %s", err)
+ }
+
+ for i, unsealKey := range unsealKeys {
+ unsealBytes, err := hex.DecodeString(unsealKey)
+ if err != nil {
+ t.Fatalf("Error hex decoding unseal key %s: %s", unsealKey, err)
+ }
+ unsealed, err := core.Unseal(unsealBytes)
+ if err != nil {
+ t.Fatalf("Error using unseal key %s: %s", unsealKey, err)
+ }
+ if i >= 2 && !unsealed {
+ t.Fatalf("Error: Provided two unseal keys but core is not unsealed")
+ }
+ }
+ }
+
+ testFunc(backupKeysB64)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/policy_delete.go b/vendor/github.com/hashicorp/vault/command/policy_delete.go
new file mode 100644
index 0000000..ff8342a
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/policy_delete.go
@@ -0,0 +1,65 @@
+package command
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/meta"
+)
+
+// PolicyDeleteCommand is a Command that enables a new endpoint.
+type PolicyDeleteCommand struct {
+ meta.Meta
+}
+
+func (c *PolicyDeleteCommand) Run(args []string) int {
+ flags := c.Meta.FlagSet("policy-delete", meta.FlagSetDefault)
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ args = flags.Args()
+ if len(args) != 1 {
+ flags.Usage()
+ c.Ui.Error(fmt.Sprintf(
+ "\npolicy-delete expects exactly one argument"))
+ return 1
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ name := args[0]
+ if err := client.Sys().DeletePolicy(name); err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error: %s", err))
+ return 1
+ }
+
+ c.Ui.Output(fmt.Sprintf("Policy '%s' deleted.", name))
+ return 0
+}
+
+func (c *PolicyDeleteCommand) Synopsis() string {
+ return "Delete a policy from the server"
+}
+
+func (c *PolicyDeleteCommand) Help() string {
+ helpText := `
+Usage: vault policy-delete [options] name
+
+ Delete a policy with the given name.
+
+ Once the policy is deleted, all users associated with the policy will
+ be affected immediately. When a user is associated with a policy that
+ doesn't exist, it is identical to not being associated with that policy.
+
+General Options:
+` + meta.GeneralOptionsUsage()
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/policy_delete_test.go b/vendor/github.com/hashicorp/vault/command/policy_delete_test.go
new file mode 100644
index 0000000..4f62a10
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/policy_delete_test.go
@@ -0,0 +1,61 @@
+package command
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestPolicyDelete(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &PolicyDeleteCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "foo",
+ }
+
+ // Run once so the client is setup, ignore errors
+ c.Run(args)
+
+ // Get the client so we can write data
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if err := client.Sys().PutPolicy("foo", testPolicyDeleteRules); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Test that the delete works
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ // Test the policy is gone
+ rules, err := client.Sys().GetPolicy("foo")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if rules != "" {
+ t.Fatalf("bad: %#v", rules)
+ }
+}
+
+const testPolicyDeleteRules = `
+path "sys" {
+ policy = "deny"
+}
+`
diff --git a/vendor/github.com/hashicorp/vault/command/policy_list.go b/vendor/github.com/hashicorp/vault/command/policy_list.go
new file mode 100644
index 0000000..73cb9c5
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/policy_list.go
@@ -0,0 +1,92 @@
+package command
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/meta"
+)
+
+// PolicyListCommand is a Command that enables a new endpoint.
+type PolicyListCommand struct {
+ meta.Meta
+}
+
+func (c *PolicyListCommand) Run(args []string) int {
+ flags := c.Meta.FlagSet("policy-list", meta.FlagSetDefault)
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ args = flags.Args()
+ if len(args) == 1 {
+ return c.read(args[0])
+ } else if len(args) == 0 {
+ return c.list()
+ } else {
+ flags.Usage()
+ c.Ui.Error(fmt.Sprintf(
+ "\npolicies expects zero or one arguments"))
+ return 1
+ }
+}
+
+func (c *PolicyListCommand) list() int {
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ policies, err := client.Sys().ListPolicies()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error: %s", err))
+ return 1
+ }
+
+ for _, p := range policies {
+ c.Ui.Output(p)
+ }
+
+ return 0
+}
+
+func (c *PolicyListCommand) read(n string) int {
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ rules, err := client.Sys().GetPolicy(n)
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error: %s", err))
+ return 1
+ }
+
+ c.Ui.Output(rules)
+ return 0
+}
+
+func (c *PolicyListCommand) Synopsis() string {
+ return "List the policies on the server"
+}
+
+func (c *PolicyListCommand) Help() string {
+ helpText := `
+Usage: vault policies [options] [name]
+
+ List the policies that are available or read a single policy.
+
+ This command lists the policies that are written to the Vault server.
+ If a name of a policy is specified, that policy is outputted.
+
+General Options:
+` + meta.GeneralOptionsUsage()
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/policy_list_test.go b/vendor/github.com/hashicorp/vault/command/policy_list_test.go
new file mode 100644
index 0000000..b2afe29
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/policy_list_test.go
@@ -0,0 +1,53 @@
+package command
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestPolicyList(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &PolicyListCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+}
+
+func TestPolicyRead(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &PolicyListCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "root",
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/policy_write.go b/vendor/github.com/hashicorp/vault/command/policy_write.go
new file mode 100644
index 0000000..4f73ffe
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/policy_write.go
@@ -0,0 +1,89 @@
+package command
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "github.com/hashicorp/vault/meta"
+)
+
+// PolicyWriteCommand is a Command that enables a new endpoint.
+type PolicyWriteCommand struct {
+ meta.Meta
+}
+
+func (c *PolicyWriteCommand) Run(args []string) int {
+ flags := c.Meta.FlagSet("policy-write", meta.FlagSetDefault)
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ args = flags.Args()
+ if len(args) != 2 {
+ flags.Usage()
+ c.Ui.Error(fmt.Sprintf(
+ "\npolicy-write expects exactly two arguments"))
+ return 1
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ name := args[0]
+ path := args[1]
+
+ // Read the policy
+ var f io.Reader = os.Stdin
+ if path != "-" {
+ file, err := os.Open(path)
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error opening file: %s", err))
+ return 1
+ }
+ defer file.Close()
+ f = file
+ }
+ var buf bytes.Buffer
+ if _, err := io.Copy(&buf, f); err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error reading file: %s", err))
+ return 1
+ }
+ rules := buf.String()
+
+ if err := client.Sys().PutPolicy(name, rules); err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error: %s", err))
+ return 1
+ }
+
+ c.Ui.Output(fmt.Sprintf("Policy '%s' written.", name))
+ return 0
+}
+
+func (c *PolicyWriteCommand) Synopsis() string {
+ return "Write a policy to the server"
+}
+
+func (c *PolicyWriteCommand) Help() string {
+ helpText := `
+Usage: vault policy-write [options] name path
+
+ Write a policy with the given name from the contents of a file or stdin.
+
+ If the path is "-", the policy is read from stdin. Otherwise, it is
+ loaded from the file at the given path.
+
+General Options:
+` + meta.GeneralOptionsUsage()
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/policy_write_test.go b/vendor/github.com/hashicorp/vault/command/policy_write_test.go
new file mode 100644
index 0000000..d0deeaa
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/policy_write_test.go
@@ -0,0 +1,33 @@
+package command
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestPolicyWrite(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &PolicyWriteCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "foo",
+ "./test-fixtures/policy.hcl",
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/read.go b/vendor/github.com/hashicorp/vault/command/read.go
new file mode 100644
index 0000000..6e9c4d7
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/read.go
@@ -0,0 +1,97 @@
+package command
+
+import (
+ "flag"
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/meta"
+)
+
+// ReadCommand is a Command that reads data from the Vault.
+type ReadCommand struct {
+ meta.Meta
+}
+
+func (c *ReadCommand) Run(args []string) int {
+ var format string
+ var field string
+ var err error
+ var secret *api.Secret
+ var flags *flag.FlagSet
+ flags = c.Meta.FlagSet("read", meta.FlagSetDefault)
+ flags.StringVar(&format, "format", "table", "")
+ flags.StringVar(&field, "field", "", "")
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ args = flags.Args()
+ if len(args) != 1 || len(args[0]) == 0 {
+ c.Ui.Error("read expects one argument")
+ flags.Usage()
+ return 1
+ }
+
+ path := args[0]
+ if path[0] == '/' {
+ path = path[1:]
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ secret, err = client.Logical().Read(path)
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error reading %s: %s", path, err))
+ return 1
+ }
+ if secret == nil {
+ c.Ui.Error(fmt.Sprintf(
+ "No value found at %s", path))
+ return 1
+ }
+
+ // Handle single field output
+ if field != "" {
+ return PrintRawField(c.Ui, secret, field)
+ }
+
+ return OutputSecret(c.Ui, format, secret)
+}
+
+func (c *ReadCommand) Synopsis() string {
+ return "Read data or secrets from Vault"
+}
+
+func (c *ReadCommand) Help() string {
+ helpText := `
+Usage: vault read [options] path
+
+ Read data from Vault.
+
+ Reads data at the given path from Vault. This can be used to read
+ secrets and configuration as well as generate dynamic values from
+ materialized backends. Please reference the documentation for the
+ backends in use to determine key structure.
+
+General Options:
+` + meta.GeneralOptionsUsage() + `
+Read Options:
+
+ -format=table The format for output. By default it is a whitespace-
+ delimited table. This can also be json or yaml.
+
+ -field=field If included, the raw value of the specified field
+ will be output raw to stdout.
+
+`
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/read_test.go b/vendor/github.com/hashicorp/vault/command/read_test.go
new file mode 100644
index 0000000..5cf0f08
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/read_test.go
@@ -0,0 +1,137 @@
+package command
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestRead(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &ReadCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "sys/mounts",
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+}
+
+func TestRead_notFound(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &ReadCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "secret/nope",
+ }
+ if code := c.Run(args); code != 1 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+}
+
+func TestRead_field(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &ReadCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "-field", "value",
+ "secret/foo",
+ }
+
+ // Run once so the client is setup, ignore errors
+ c.Run(args)
+
+ // Get the client so we can write data
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ data := map[string]interface{}{"value": "bar"}
+ if _, err := client.Logical().Write("secret/foo", data); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Run the read
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ output := ui.OutputWriter.String()
+ if output != "bar\n" {
+ t.Fatalf("unexpectd output:\n%s", output)
+ }
+}
+
+func TestRead_field_notFound(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &ReadCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "-field", "nope",
+ "secret/foo",
+ }
+
+ // Run once so the client is setup, ignore errors
+ c.Run(args)
+
+ // Get the client so we can write data
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ data := map[string]interface{}{"value": "bar"}
+ if _, err := client.Logical().Write("secret/foo", data); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Run the read
+ if code := c.Run(args); code != 1 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/rekey.go b/vendor/github.com/hashicorp/vault/command/rekey.go
new file mode 100644
index 0000000..16022be
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/rekey.go
@@ -0,0 +1,420 @@
+package command
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/fatih/structs"
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/helper/password"
+ "github.com/hashicorp/vault/helper/pgpkeys"
+ "github.com/hashicorp/vault/meta"
+)
+
+// RekeyCommand is a Command that rekeys the vault.
+type RekeyCommand struct {
+ meta.Meta
+
+ // Key can be used to pre-seed the key. If it is set, it will not
+ // be asked with the `password` helper.
+ Key string
+
+ // The nonce for the rekey request to send along
+ Nonce string
+
+ // Whether to use the recovery key instead of barrier key, if available
+ RecoveryKey bool
+}
+
+func (c *RekeyCommand) Run(args []string) int {
+ var init, cancel, status, delete, retrieve, backup, recoveryKey bool
+ var shares, threshold int
+ var nonce string
+ var pgpKeys pgpkeys.PubKeyFilesFlag
+ flags := c.Meta.FlagSet("rekey", meta.FlagSetDefault)
+ flags.BoolVar(&init, "init", false, "")
+ flags.BoolVar(&cancel, "cancel", false, "")
+ flags.BoolVar(&status, "status", false, "")
+ flags.BoolVar(&delete, "delete", false, "")
+ flags.BoolVar(&retrieve, "retrieve", false, "")
+ flags.BoolVar(&backup, "backup", false, "")
+ flags.BoolVar(&recoveryKey, "recovery-key", c.RecoveryKey, "")
+ flags.IntVar(&shares, "key-shares", 5, "")
+ flags.IntVar(&threshold, "key-threshold", 3, "")
+ flags.StringVar(&nonce, "nonce", "", "")
+ flags.Var(&pgpKeys, "pgp-keys", "")
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ if nonce != "" {
+ c.Nonce = nonce
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ // Check if we are running doing any restricted variants
+ switch {
+ case init:
+ return c.initRekey(client, shares, threshold, pgpKeys, backup, recoveryKey)
+ case cancel:
+ return c.cancelRekey(client, recoveryKey)
+ case status:
+ return c.rekeyStatus(client, recoveryKey)
+ case retrieve:
+ return c.rekeyRetrieveStored(client, recoveryKey)
+ case delete:
+ return c.rekeyDeleteStored(client, recoveryKey)
+ }
+
+ // Check if the rekey is started
+ var rekeyStatus *api.RekeyStatusResponse
+ if recoveryKey {
+ rekeyStatus, err = client.Sys().RekeyRecoveryKeyStatus()
+ } else {
+ rekeyStatus, err = client.Sys().RekeyStatus()
+ }
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Error reading rekey status: %s", err))
+ return 1
+ }
+
+ // Start the rekey process if not started
+ if !rekeyStatus.Started {
+ if recoveryKey {
+ rekeyStatus, err = client.Sys().RekeyRecoveryKeyInit(&api.RekeyInitRequest{
+ SecretShares: shares,
+ SecretThreshold: threshold,
+ PGPKeys: pgpKeys,
+ Backup: backup,
+ })
+ } else {
+ rekeyStatus, err = client.Sys().RekeyInit(&api.RekeyInitRequest{
+ SecretShares: shares,
+ SecretThreshold: threshold,
+ PGPKeys: pgpKeys,
+ Backup: backup,
+ })
+ }
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Error initializing rekey: %s", err))
+ return 1
+ }
+ c.Nonce = rekeyStatus.Nonce
+ }
+
+ shares = rekeyStatus.N
+ threshold = rekeyStatus.T
+ serverNonce := rekeyStatus.Nonce
+
+ // Get the unseal key
+ args = flags.Args()
+ key := c.Key
+ if len(args) > 0 {
+ key = args[0]
+ }
+ if key == "" {
+ c.Nonce = serverNonce
+ fmt.Printf("Rekey operation nonce: %s\n", serverNonce)
+ fmt.Printf("Key (will be hidden): ")
+ key, err = password.Read(os.Stdin)
+ fmt.Printf("\n")
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error attempting to ask for password. The raw error message\n"+
+ "is shown below, but the most common reason for this error is\n"+
+ "that you attempted to pipe a value into unseal or you're\n"+
+ "executing `vault rekey` from outside of a terminal.\n\n"+
+ "You should use `vault rekey` from a terminal for maximum\n"+
+ "security. If this isn't an option, the unseal key can be passed\n"+
+ "in using the first parameter.\n\n"+
+ "Raw error: %s", err))
+ return 1
+ }
+ }
+
+ // Provide the key, this may potentially complete the update
+ var result *api.RekeyUpdateResponse
+ if recoveryKey {
+ result, err = client.Sys().RekeyRecoveryKeyUpdate(strings.TrimSpace(key), c.Nonce)
+ } else {
+ result, err = client.Sys().RekeyUpdate(strings.TrimSpace(key), c.Nonce)
+ }
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Error attempting rekey update: %s", err))
+ return 1
+ }
+
+ // If we are not complete, then dump the status
+ if !result.Complete {
+ return c.rekeyStatus(client, recoveryKey)
+ }
+
+ // Space between the key prompt, if any, and the output
+ c.Ui.Output("\n")
+ // Provide the keys
+ var haveB64 bool
+ if result.KeysB64 != nil && len(result.KeysB64) == len(result.Keys) {
+ haveB64 = true
+ }
+ for i, key := range result.Keys {
+ if len(result.PGPFingerprints) > 0 {
+ if haveB64 {
+ c.Ui.Output(fmt.Sprintf("Key %d fingerprint: %s; value: %s", i+1, result.PGPFingerprints[i], result.KeysB64[i]))
+ } else {
+ c.Ui.Output(fmt.Sprintf("Key %d fingerprint: %s; value: %s", i+1, result.PGPFingerprints[i], key))
+ }
+ } else {
+ if haveB64 {
+ c.Ui.Output(fmt.Sprintf("Key %d: %s", i+1, result.KeysB64[i]))
+ } else {
+ c.Ui.Output(fmt.Sprintf("Key %d: %s", i+1, key))
+ }
+ }
+ }
+
+ c.Ui.Output(fmt.Sprintf("\nOperation nonce: %s", result.Nonce))
+
+ if len(result.PGPFingerprints) > 0 && result.Backup {
+ c.Ui.Output(fmt.Sprintf(
+ "\n" +
+ "The encrypted unseal keys have been backed up to \"core/unseal-keys-backup\"\n" +
+ "in your physical backend. It is your responsibility to remove these if and\n" +
+ "when desired.",
+ ))
+ }
+
+ c.Ui.Output(fmt.Sprintf(
+ "\n"+
+ "Vault rekeyed with %d keys and a key threshold of %d. Please\n"+
+ "securely distribute the above keys. When the vault is re-sealed,\n"+
+ "restarted, or stopped, you must provide at least %d of these keys\n"+
+ "to unseal it again.\n\n"+
+ "Vault does not store the master key. Without at least %d keys,\n"+
+ "your vault will remain permanently sealed.",
+ shares,
+ threshold,
+ threshold,
+ threshold,
+ ))
+
+ return 0
+}
+
+// initRekey is used to start the rekey process
+func (c *RekeyCommand) initRekey(client *api.Client,
+ shares, threshold int,
+ pgpKeys pgpkeys.PubKeyFilesFlag,
+ backup, recoveryKey bool) int {
+ // Start the rekey
+ request := &api.RekeyInitRequest{
+ SecretShares: shares,
+ SecretThreshold: threshold,
+ PGPKeys: pgpKeys,
+ Backup: backup,
+ }
+ var status *api.RekeyStatusResponse
+ var err error
+ if recoveryKey {
+ status, err = client.Sys().RekeyRecoveryKeyInit(request)
+ } else {
+ status, err = client.Sys().RekeyInit(request)
+ }
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Error initializing rekey: %s", err))
+ return 1
+ }
+
+ if pgpKeys == nil || len(pgpKeys) == 0 {
+ c.Ui.Output(`
+WARNING: If you lose the keys after they are returned to you, there is no
+recovery. Consider using the '-pgp-keys' option to protect the returned unseal
+keys along with '-backup=true' to allow recovery of the encrypted keys in case
+of emergency. They can easily be deleted at a later time with
+'vault rekey -delete'.
+`)
+ }
+
+ if pgpKeys != nil && len(pgpKeys) > 0 && !backup {
+ c.Ui.Output(`
+WARNING: You are using PGP keys for encryption, but have not set the option to
+back up the new unseal keys to physical storage. If you lose the keys after
+they are returned to you, there is no recovery. Consider setting '-backup=true'
+to allow recovery of the encrypted keys in case of emergency. They can easily
+be deleted at a later time with 'vault rekey -delete'.
+`)
+ }
+
+ // Provide the current status
+ return c.dumpRekeyStatus(status)
+}
+
+// cancelRekey is used to abort the rekey process
+func (c *RekeyCommand) cancelRekey(client *api.Client, recovery bool) int {
+ var err error
+ if recovery {
+ err = client.Sys().RekeyRecoveryKeyCancel()
+ } else {
+ err = client.Sys().RekeyCancel()
+ }
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Failed to cancel rekey: %s", err))
+ return 1
+ }
+ c.Ui.Output("Rekey canceled.")
+ return 0
+}
+
+// rekeyStatus is used just to fetch and dump the status
+func (c *RekeyCommand) rekeyStatus(client *api.Client, recovery bool) int {
+ // Check the status
+ var status *api.RekeyStatusResponse
+ var err error
+ if recovery {
+ status, err = client.Sys().RekeyRecoveryKeyStatus()
+ } else {
+ status, err = client.Sys().RekeyStatus()
+ }
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Error reading rekey status: %s", err))
+ return 1
+ }
+
+ return c.dumpRekeyStatus(status)
+}
+
+func (c *RekeyCommand) dumpRekeyStatus(status *api.RekeyStatusResponse) int {
+ // Dump the status
+ statString := fmt.Sprintf(
+ "Nonce: %s\n"+
+ "Started: %t\n"+
+ "Key Shares: %d\n"+
+ "Key Threshold: %d\n"+
+ "Rekey Progress: %d\n"+
+ "Required Keys: %d",
+ status.Nonce,
+ status.Started,
+ status.N,
+ status.T,
+ status.Progress,
+ status.Required,
+ )
+ if len(status.PGPFingerprints) != 0 {
+ statString = fmt.Sprintf("%s\nPGP Key Fingerprints: %s", statString, status.PGPFingerprints)
+ statString = fmt.Sprintf("%s\nBackup Storage: %t", statString, status.Backup)
+ }
+ c.Ui.Output(statString)
+ return 0
+}
+
+func (c *RekeyCommand) rekeyRetrieveStored(client *api.Client, recovery bool) int {
+ var storedKeys *api.RekeyRetrieveResponse
+ var err error
+ if recovery {
+ storedKeys, err = client.Sys().RekeyRetrieveRecoveryBackup()
+ } else {
+ storedKeys, err = client.Sys().RekeyRetrieveBackup()
+ }
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Error retrieving stored keys: %s", err))
+ return 1
+ }
+
+ secret := &api.Secret{
+ Data: structs.New(storedKeys).Map(),
+ }
+
+ return OutputSecret(c.Ui, "table", secret)
+}
+
+func (c *RekeyCommand) rekeyDeleteStored(client *api.Client, recovery bool) int {
+ var err error
+ if recovery {
+ err = client.Sys().RekeyDeleteRecoveryBackup()
+ } else {
+ err = client.Sys().RekeyDeleteBackup()
+ }
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Failed to delete stored keys: %s", err))
+ return 1
+ }
+ c.Ui.Output("Stored keys deleted.")
+ return 0
+}
+
+func (c *RekeyCommand) Synopsis() string {
+ return "Rekeys Vault to generate new unseal keys"
+}
+
+func (c *RekeyCommand) Help() string {
+ helpText := `
+Usage: vault rekey [options] [key]
+
+ Rekey is used to change the unseal keys. This can be done to generate
+ a new set of unseal keys or to change the number of shares and the
+ required threshold.
+
+ Rekey can only be done when the vault is already unsealed. The operation
+ is done online, but requires that a threshold of the current unseal
+ keys be provided.
+
+General Options:
+` + meta.GeneralOptionsUsage() + `
+Rekey Options:
+
+ -init Initialize the rekey operation by setting the desired
+ number of shares and the key threshold. This can only be
+ done if no rekey is already initiated.
+
+ -cancel Reset the rekey process by throwing away
+ prior keys and the rekey configuration.
+
+ -status Prints the status of the current rekey operation.
+ This can be used to see the status without attempting
+ to provide an unseal key.
+
+ -retrieve Retrieve backed-up keys. Only available if the PGP keys
+ were provided and the backup has not been deleted.
+
+ -delete Delete any backed-up keys.
+
+ -key-shares=5 The number of key shares to split the master key
+ into.
+
+ -key-threshold=3 The number of key shares required to reconstruct
+ the master key.
+
+ -nonce=abcd The nonce provided at rekey initialization time. This
+ same nonce value must be provided with each unseal
+ key. If the unseal key is not being passed in via the
+ the command line the nonce parameter is not required,
+ and will instead be displayed with the key prompt.
+
+ -pgp-keys If provided, must be a comma-separated list of
+ files on disk containing binary- or base64-format
+ public PGP keys, or Keybase usernames specified as
+ "keybase:". The number of given entries
+ must match 'key-shares'. The output unseal keys will
+ be encrypted and base64-encoded, in order, with the
+ given public keys. If you want to use them with the
+ 'vault unseal' command, you will need to base64-decode
+ and decrypt; this will be the plaintext unseal key.
+
+ -backup=false If true, and if the key shares are PGP-encrypted, a
+ plaintext backup of the PGP-encrypted keys will be
+ stored at "core/unseal-keys-backup" in your physical
+ storage. You can retrieve or delete them via the
+ 'sys/rekey/backup' endpoint.
+
+ -recovery-key=false Whether to rekey the recovery key instead of the
+ barrier key. Only used with Vault HSM.
+`
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/rekey_test.go b/vendor/github.com/hashicorp/vault/command/rekey_test.go
new file mode 100644
index 0000000..21e4e24
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/rekey_test.go
@@ -0,0 +1,311 @@
+package command
+
+import (
+ "encoding/hex"
+ "os"
+ "sort"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestRekey(t *testing.T) {
+ core, keys, _ := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+
+ for i, key := range keys {
+ c := &RekeyCommand{
+ Key: hex.EncodeToString(key),
+ RecoveryKey: false,
+ Meta: meta.Meta{
+ Ui: ui,
+ },
+ }
+
+ if i > 0 {
+ conf, err := core.RekeyConfig(false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ c.Nonce = conf.Nonce
+ }
+
+ args := []string{"-address", addr}
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+ }
+
+ config, err := core.SealAccess().BarrierConfig()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if config.SecretShares != 5 {
+ t.Fatal("should rekey")
+ }
+}
+
+func TestRekey_arg(t *testing.T) {
+ core, keys, _ := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+
+ for i, key := range keys {
+ c := &RekeyCommand{
+ RecoveryKey: false,
+ Meta: meta.Meta{
+ Ui: ui,
+ },
+ }
+
+ if i > 0 {
+ conf, err := core.RekeyConfig(false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ c.Nonce = conf.Nonce
+ }
+
+ args := []string{"-address", addr, hex.EncodeToString(key)}
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+ }
+
+ config, err := core.SealAccess().BarrierConfig()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if config.SecretShares != 5 {
+ t.Fatal("should rekey")
+ }
+}
+
+func TestRekey_init(t *testing.T) {
+ core, _, _ := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+
+ c := &RekeyCommand{
+ Meta: meta.Meta{
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "-init",
+ "-key-threshold", "10",
+ "-key-shares", "10",
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ config, err := core.RekeyConfig(false)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if config.SecretShares != 10 {
+ t.Fatal("should rekey")
+ }
+ if config.SecretThreshold != 10 {
+ t.Fatal("should rekey")
+ }
+}
+
+func TestRekey_cancel(t *testing.T) {
+ core, keys, _ := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &RekeyCommand{
+ Key: hex.EncodeToString(keys[0]),
+ Meta: meta.Meta{
+ Ui: ui,
+ },
+ }
+
+ args := []string{"-address", addr, "-init"}
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ args = []string{"-address", addr, "-cancel"}
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ config, err := core.RekeyConfig(false)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if config != nil {
+ t.Fatal("should not rekey")
+ }
+}
+
+func TestRekey_status(t *testing.T) {
+ core, keys, _ := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &RekeyCommand{
+ Key: hex.EncodeToString(keys[0]),
+ Meta: meta.Meta{
+ Ui: ui,
+ },
+ }
+
+ args := []string{"-address", addr, "-init"}
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ args = []string{"-address", addr, "-status"}
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ if !strings.Contains(string(ui.OutputWriter.Bytes()), "Started: true") {
+ t.Fatalf("bad: %s", ui.OutputWriter.String())
+ }
+}
+
+func TestRekey_init_pgp(t *testing.T) {
+ core, keys, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ bc := &logical.BackendConfig{
+ Logger: nil,
+ System: logical.StaticSystemView{
+ DefaultLeaseTTLVal: time.Hour * 24,
+ MaxLeaseTTLVal: time.Hour * 24 * 32,
+ },
+ }
+ sysBackend, err := vault.NewSystemBackend(core, bc)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ui := new(cli.MockUi)
+ c := &RekeyCommand{
+ Meta: meta.Meta{
+ Ui: ui,
+ },
+ }
+
+ tempDir, pubFiles, err := getPubKeyFiles(t)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tempDir)
+
+ args := []string{
+ "-address", addr,
+ "-init",
+ "-key-shares", "4",
+ "-pgp-keys", pubFiles[0] + ",@" + pubFiles[1] + "," + pubFiles[2] + "," + pubFiles[3],
+ "-key-threshold", "2",
+ "-backup", "true",
+ }
+
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ config, err := core.RekeyConfig(false)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if config.SecretShares != 4 {
+ t.Fatal("should rekey")
+ }
+ if config.SecretThreshold != 2 {
+ t.Fatal("should rekey")
+ }
+
+ for _, key := range keys {
+ c = &RekeyCommand{
+ Key: hex.EncodeToString(key),
+ Meta: meta.Meta{
+ Ui: ui,
+ },
+ }
+
+ c.Nonce = config.Nonce
+
+ args = []string{
+ "-address", addr,
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+ }
+
+ type backupStruct struct {
+ Keys map[string][]string
+ KeysB64 map[string][]string
+ }
+ backupVals := &backupStruct{}
+
+ req := logical.TestRequest(t, logical.ReadOperation, "rekey/backup")
+ resp, err := sysBackend.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("error running backed-up unseal key fetch: %v", err)
+ }
+ if resp == nil {
+ t.Fatalf("got nil resp with unseal key fetch")
+ }
+ if resp.Data["keys"] == nil {
+ t.Fatalf("could not retrieve unseal keys from token")
+ }
+ if resp.Data["nonce"] != config.Nonce {
+ t.Fatalf("nonce mismatch between rekey and backed-up keys")
+ }
+
+ backupVals.Keys = resp.Data["keys"].(map[string][]string)
+ backupVals.KeysB64 = resp.Data["keys_base64"].(map[string][]string)
+
+ // Now delete and try again; the values should be inaccessible
+ req = logical.TestRequest(t, logical.DeleteOperation, "rekey/backup")
+ resp, err = sysBackend.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("error running backed-up unseal key delete: %v", err)
+ }
+ req = logical.TestRequest(t, logical.ReadOperation, "rekey/backup")
+ resp, err = sysBackend.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("error running backed-up unseal key fetch: %v", err)
+ }
+ if resp == nil {
+ t.Fatalf("got nil resp with unseal key fetch")
+ }
+ if resp.Data["keys"] != nil {
+ t.Fatalf("keys found when they should have been deleted")
+ }
+
+ // Sort, because it'll be tested with DeepEqual later
+ for k, _ := range backupVals.Keys {
+ sort.Strings(backupVals.Keys[k])
+ sort.Strings(backupVals.KeysB64[k])
+ }
+
+ parseDecryptAndTestUnsealKeys(t, ui.OutputWriter.String(), token, true, backupVals.Keys, backupVals.KeysB64, core)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/remount.go b/vendor/github.com/hashicorp/vault/command/remount.go
new file mode 100644
index 0000000..a6defa7
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/remount.go
@@ -0,0 +1,74 @@
+package command
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/meta"
+)
+
+// RemountCommand is a Command that remounts a mounted secret backend
+// to a new endpoint.
+type RemountCommand struct {
+ meta.Meta
+}
+
+func (c *RemountCommand) Run(args []string) int {
+ flags := c.Meta.FlagSet("remount", meta.FlagSetDefault)
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ args = flags.Args()
+ if len(args) != 2 {
+ flags.Usage()
+ c.Ui.Error(fmt.Sprintf(
+ "\nremount expects two arguments: the from and to path"))
+ return 1
+ }
+
+ from := args[0]
+ to := args[1]
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ if err := client.Sys().Remount(from, to); err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Unmount error: %s", err))
+ return 2
+ }
+
+ c.Ui.Output(fmt.Sprintf(
+ "Successfully remounted from '%s' to '%s'!", from, to))
+
+ return 0
+}
+
+func (c *RemountCommand) Synopsis() string {
+ return "Remount a secret backend to a new path"
+}
+
+func (c *RemountCommand) Help() string {
+ helpText := `
+Usage: vault remount [options] from to
+
+ Remount a mounted secret backend to a new path.
+
+ This command remounts a secret backend that is already mounted to
+ a new path. All the secrets from the old path will be revoked, but
+ the data associated with the backend (such as configuration), will
+ be preserved.
+
+ Example: vault remount secret/ generic/
+
+General Options:
+` + meta.GeneralOptionsUsage()
+
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/remount_test.go b/vendor/github.com/hashicorp/vault/command/remount_test.go
new file mode 100644
index 0000000..0d6f191
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/remount_test.go
@@ -0,0 +1,52 @@
+package command
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestRemount(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &RemountCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "secret/", "generic",
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ mounts, err := client.Sys().ListMounts()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ _, ok := mounts["secret/"]
+ if ok {
+ t.Fatal("should not have mount")
+ }
+
+ _, ok = mounts["generic/"]
+ if !ok {
+ t.Fatal("should have generic")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/renew.go b/vendor/github.com/hashicorp/vault/command/renew.go
new file mode 100644
index 0000000..6a3eafe
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/renew.go
@@ -0,0 +1,90 @@
+package command
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/vault/meta"
+)
+
+// RenewCommand is a Command that mounts a new mount.
+type RenewCommand struct {
+ meta.Meta
+}
+
+func (c *RenewCommand) Run(args []string) int {
+ var format string
+ flags := c.Meta.FlagSet("renew", meta.FlagSetDefault)
+ flags.StringVar(&format, "format", "table", "")
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ args = flags.Args()
+ if len(args) < 1 || len(args) >= 3 {
+ flags.Usage()
+ c.Ui.Error(fmt.Sprintf(
+ "\nrenew expects at least one argument: the lease ID to renew"))
+ return 1
+ }
+
+ var increment int
+ leaseId := args[0]
+ if len(args) > 1 {
+ parsed, err := strconv.ParseInt(args[1], 10, 0)
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Invalid increment, must be an int: %s", err))
+ return 1
+ }
+
+ increment = int(parsed)
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ secret, err := client.Sys().Renew(leaseId, increment)
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Renew error: %s", err))
+ return 1
+ }
+
+ return OutputSecret(c.Ui, format, secret)
+}
+
+func (c *RenewCommand) Synopsis() string {
+ return "Renew the lease of a secret"
+}
+
+func (c *RenewCommand) Help() string {
+ helpText := `
+Usage: vault renew [options] id [increment]
+
+ Renew the lease on a secret, extending the time that it can be used
+ before it is revoked by Vault.
+
+ Every secret in Vault has a lease associated with it. If the user of
+ the secret wants to use it longer than the lease, then it must be
+ renewed. Renewing the lease will not change the contents of the secret.
+
+ To renew a secret, run this command with the lease ID returned when it
+ was read. Optionally, request a specific increment in seconds. Vault
+ is not required to honor this request.
+
+General Options:
+` + meta.GeneralOptionsUsage() + `
+Renew Options:
+
+ -format=table The format for output. By default it is a whitespace-
+ delimited table. This can also be json or yaml.
+`
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/renew_test.go b/vendor/github.com/hashicorp/vault/command/renew_test.go
new file mode 100644
index 0000000..d43e516
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/renew_test.go
@@ -0,0 +1,107 @@
+package command
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestRenew(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &RenewCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ // write a secret with a lease
+ client := testClient(t, addr, token)
+ _, err := client.Logical().Write("secret/foo", map[string]interface{}{
+ "key": "value",
+ "lease": "1m",
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // read the secret to get its lease ID
+ secret, err := client.Logical().Read("secret/foo")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ args := []string{
+ "-address", addr,
+ secret.LeaseID,
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+}
+
+func TestRenewBothWays(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ // write a secret with a lease
+ client := testClient(t, addr, token)
+ _, err := client.Logical().Write("secret/foo", map[string]interface{}{
+ "key": "value",
+ "ttl": "1m",
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // read the secret to get its lease ID
+ secret, err := client.Logical().Read("secret/foo")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Test one renew path
+ r := client.NewRequest("PUT", "/v1/sys/renew")
+ body := map[string]interface{}{
+ "lease_id": secret.LeaseID,
+ }
+ if err := r.SetJSONBody(body); err != nil {
+ t.Fatal(err)
+ }
+ resp, err := client.RawRequest(r)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer resp.Body.Close()
+ secret, err = api.ParseSecret(resp.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if secret.LeaseDuration != 60 {
+ t.Fatal("bad lease duration")
+ }
+
+ // Test the other
+ r = client.NewRequest("PUT", "/v1/sys/renew/"+secret.LeaseID)
+ resp, err = client.RawRequest(r)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer resp.Body.Close()
+ secret, err = api.ParseSecret(resp.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if secret.LeaseDuration != 60 {
+ t.Fatalf("bad lease duration; secret is %#v\n", *secret)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/revoke.go b/vendor/github.com/hashicorp/vault/command/revoke.go
new file mode 100644
index 0000000..50933ad
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/revoke.go
@@ -0,0 +1,95 @@
+package command
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/meta"
+)
+
+// RevokeCommand is a Command that mounts a new mount.
+type RevokeCommand struct {
+ meta.Meta
+}
+
+func (c *RevokeCommand) Run(args []string) int {
+ var prefix, force bool
+ flags := c.Meta.FlagSet("revoke", meta.FlagSetDefault)
+ flags.BoolVar(&prefix, "prefix", false, "")
+ flags.BoolVar(&force, "force", false, "")
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ args = flags.Args()
+ if len(args) != 1 {
+ flags.Usage()
+ c.Ui.Error(fmt.Sprintf(
+ "\nrevoke expects one argument: the ID to revoke"))
+ return 1
+ }
+ leaseId := args[0]
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ switch {
+ case force && !prefix:
+ c.Ui.Error(fmt.Sprintf(
+ "-force requires -prefix"))
+ return 1
+ case force && prefix:
+ err = client.Sys().RevokeForce(leaseId)
+ case prefix:
+ err = client.Sys().RevokePrefix(leaseId)
+ default:
+ err = client.Sys().Revoke(leaseId)
+ }
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Revoke error: %s", err))
+ return 1
+ }
+
+ c.Ui.Output(fmt.Sprintf("Success! Revoked the secret with ID '%s', if it existed.", leaseId))
+ return 0
+}
+
+func (c *RevokeCommand) Synopsis() string {
+ return "Revoke a secret."
+}
+
+func (c *RevokeCommand) Help() string {
+ helpText := `
+Usage: vault revoke [options] id
+
+ Revoke a secret by its lease ID.
+
+ This command revokes a secret by its lease ID that was returned with it. Once
+ the key is revoked, it is no longer valid.
+
+ With the -prefix flag, the revoke is done by prefix: any secret prefixed with
+ the given partial ID is revoked. Lease IDs are structured in such a way to
+ make revocation of prefixes useful.
+
+ With the -force flag, the lease is removed from Vault even if the revocation
+ fails. This is meant for certain recovery scenarios and should not be used
+ lightly. This option requires -prefix.
+
+General Options:
+` + meta.GeneralOptionsUsage() + `
+Revoke Options:
+
+ -prefix=true Revoke all secrets with the matching prefix. This
+ defaults to false: an exact revocation.
+
+ -force=true Delete the lease even if the actual revocation
+ operation fails.
+`
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/revoke_test.go b/vendor/github.com/hashicorp/vault/command/revoke_test.go
new file mode 100644
index 0000000..cb9febf
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/revoke_test.go
@@ -0,0 +1,46 @@
+package command
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestRevoke(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &RevokeCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ client := testClient(t, addr, token)
+ _, err := client.Logical().Write("secret/foo", map[string]interface{}{
+ "key": "value",
+ "lease": "1m",
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ secret, err := client.Logical().Read("secret/foo")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ args := []string{
+ "-address", addr,
+ secret.LeaseID,
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/rotate.go b/vendor/github.com/hashicorp/vault/command/rotate.go
new file mode 100644
index 0000000..9da3873
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/rotate.go
@@ -0,0 +1,67 @@
+package command
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/meta"
+)
+
+// RotateCommand is a Command that rotates the encryption key being used
+type RotateCommand struct {
+ meta.Meta
+}
+
+func (c *RotateCommand) Run(args []string) int {
+ flags := c.Meta.FlagSet("rotate", meta.FlagSetDefault)
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ // Rotate the key
+ err = client.Sys().Rotate()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error with key rotation: %s", err))
+ return 2
+ }
+
+ // Print the key status
+ status, err := client.Sys().KeyStatus()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error reading audits: %s", err))
+ return 2
+ }
+
+ c.Ui.Output(fmt.Sprintf("Key Term: %d", status.Term))
+ c.Ui.Output(fmt.Sprintf("Installation Time: %v", status.InstallTime))
+ return 0
+}
+
+func (c *RotateCommand) Synopsis() string {
+ return "Rotates the backend encryption key used to persist data"
+}
+
+func (c *RotateCommand) Help() string {
+ helpText := `
+Usage: vault rotate [options]
+
+ Rotates the backend encryption key which is used to secure data
+ written to the storage backend. This is done by installing a new key
+ which encrypts new data, while old keys are still used to decrypt
+ secrets written previously. This is an online operation and is not
+ disruptive.
+
+General Options:
+` + meta.GeneralOptionsUsage()
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/rotate_test.go b/vendor/github.com/hashicorp/vault/command/rotate_test.go
new file mode 100644
index 0000000..257f280
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/rotate_test.go
@@ -0,0 +1,31 @@
+package command
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestRotate(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &RotateCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/seal.go b/vendor/github.com/hashicorp/vault/command/seal.go
new file mode 100644
index 0000000..033c164
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/seal.go
@@ -0,0 +1,63 @@
+package command
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/meta"
+)
+
+// SealCommand is a Command that seals the vault.
+type SealCommand struct {
+ meta.Meta
+}
+
+func (c *SealCommand) Run(args []string) int {
+ flags := c.Meta.FlagSet("seal", meta.FlagSetDefault)
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ if err := client.Sys().Seal(); err != nil {
+ c.Ui.Error(fmt.Sprintf("Error sealing: %s", err))
+ return 1
+ }
+
+ c.Ui.Output("Vault is now sealed.")
+ return 0
+}
+
+func (c *SealCommand) Synopsis() string {
+ return "Seals the Vault server"
+}
+
+func (c *SealCommand) Help() string {
+ helpText := `
+Usage: vault seal [options]
+
+ Seal the vault.
+
+ Sealing a vault tells the Vault server to stop responding to any
+ access operations until it is unsealed again. A sealed vault throws away
+ its master key to unlock the data, so it is physically blocked from
+ responding to operations again until the vault is unsealed with
+ the "unseal" command or via the API.
+
+ This command is idempotent, if the vault is already sealed it does nothing.
+
+ If an unseal has started, sealing the vault will reset the unsealing
+ process. You'll have to re-enter every portion of the master key again.
+ This is the same as running "vault unseal -reset".
+
+General Options:
+` + meta.GeneralOptionsUsage()
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/seal_test.go b/vendor/github.com/hashicorp/vault/command/seal_test.go
new file mode 100644
index 0000000..c224aee
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/seal_test.go
@@ -0,0 +1,37 @@
+package command
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func Test_Seal(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &SealCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{"-address", addr}
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ sealed, err := core.Sealed()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if !sealed {
+ t.Fatal("should be sealed")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/server.go b/vendor/github.com/hashicorp/vault/command/server.go
new file mode 100644
index 0000000..c09db3d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/server.go
@@ -0,0 +1,1072 @@
+package command
+
+import (
+ "encoding/base64"
+ "fmt"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "os/signal"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "syscall"
+ "time"
+
+ "golang.org/x/net/http2"
+
+ colorable "github.com/mattn/go-colorable"
+ log "github.com/mgutz/logxi/v1"
+
+ "google.golang.org/grpc/grpclog"
+
+ "github.com/armon/go-metrics"
+ "github.com/armon/go-metrics/circonus"
+ "github.com/hashicorp/errwrap"
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/vault/audit"
+ "github.com/hashicorp/vault/command/server"
+ "github.com/hashicorp/vault/helper/flag-slice"
+ "github.com/hashicorp/vault/helper/gated-writer"
+ "github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/helper/mlock"
+ vaulthttp "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/physical"
+ "github.com/hashicorp/vault/vault"
+ "github.com/hashicorp/vault/version"
+)
+
+// ServerCommand is a Command that starts the Vault server.
+type ServerCommand struct {
+ AuditBackends map[string]audit.Factory
+ CredentialBackends map[string]logical.Factory
+ LogicalBackends map[string]logical.Factory
+
+ ShutdownCh chan struct{}
+ SighupCh chan struct{}
+
+ WaitGroup *sync.WaitGroup
+
+ meta.Meta
+
+ logger log.Logger
+
+ cleanupGuard sync.Once
+
+ reloadFuncsLock *sync.RWMutex
+ reloadFuncs *map[string][]vault.ReloadFunc
+}
+
+func (c *ServerCommand) Run(args []string) int {
+ var dev, verifyOnly, devHA, devTransactional bool
+ var configPath []string
+ var logLevel, devRootTokenID, devListenAddress string
+ flags := c.Meta.FlagSet("server", meta.FlagSetDefault)
+ flags.BoolVar(&dev, "dev", false, "")
+ flags.StringVar(&devRootTokenID, "dev-root-token-id", "", "")
+ flags.StringVar(&devListenAddress, "dev-listen-address", "", "")
+ flags.StringVar(&logLevel, "log-level", "info", "")
+ flags.BoolVar(&verifyOnly, "verify-only", false, "")
+ flags.BoolVar(&devHA, "ha", false, "")
+ flags.BoolVar(&devTransactional, "transactional", false, "")
+ flags.Usage = func() { c.Ui.Output(c.Help()) }
+ flags.Var((*sliceflag.StringFlag)(&configPath), "config", "config")
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ // Create a logger. We wrap it in a gated writer so that it doesn't
+ // start logging too early.
+ logGate := &gatedwriter.Writer{Writer: colorable.NewColorable(os.Stderr)}
+ var level int
+ logLevel = strings.ToLower(strings.TrimSpace(logLevel))
+ switch logLevel {
+ case "trace":
+ level = log.LevelTrace
+ case "debug":
+ level = log.LevelDebug
+ case "info":
+ level = log.LevelInfo
+ case "notice":
+ level = log.LevelNotice
+ case "warn":
+ level = log.LevelWarn
+ case "err":
+ level = log.LevelError
+ default:
+ c.Ui.Output(fmt.Sprintf("Unknown log level %s", logLevel))
+ return 1
+ }
+
+ logFormat := os.Getenv("VAULT_LOG_FORMAT")
+ if logFormat == "" {
+ logFormat = os.Getenv("LOGXI_FORMAT")
+ }
+ switch strings.ToLower(logFormat) {
+ case "vault", "vault_json", "vault-json", "vaultjson", "json", "":
+ c.logger = logformat.NewVaultLoggerWithWriter(logGate, level)
+ default:
+ c.logger = log.NewLogger(logGate, "vault")
+ c.logger.SetLevel(level)
+ }
+ grpclog.SetLogger(&grpclogFaker{
+ logger: c.logger,
+ })
+
+ if os.Getenv("VAULT_DEV_ROOT_TOKEN_ID") != "" && devRootTokenID == "" {
+ devRootTokenID = os.Getenv("VAULT_DEV_ROOT_TOKEN_ID")
+ }
+
+ if os.Getenv("VAULT_DEV_LISTEN_ADDRESS") != "" && devListenAddress == "" {
+ devListenAddress = os.Getenv("VAULT_DEV_LISTEN_ADDRESS")
+ }
+
+ if devHA || devTransactional {
+ dev = true
+ }
+
+ // Validation
+ if !dev {
+ switch {
+ case len(configPath) == 0:
+ c.Ui.Output("At least one config path must be specified with -config")
+ flags.Usage()
+ return 1
+ case devRootTokenID != "":
+ c.Ui.Output("Root token ID can only be specified with -dev")
+ flags.Usage()
+ return 1
+ }
+ }
+
+ // Load the configuration
+ var config *server.Config
+ if dev {
+ config = server.DevConfig(devHA, devTransactional)
+ if devListenAddress != "" {
+ config.Listeners[0].Config["address"] = devListenAddress
+ }
+ }
+ for _, path := range configPath {
+ current, err := server.LoadConfig(path, c.logger)
+ if err != nil {
+ c.Ui.Output(fmt.Sprintf(
+ "Error loading configuration from %s: %s", path, err))
+ return 1
+ }
+
+ if config == nil {
+ config = current
+ } else {
+ config = config.Merge(current)
+ }
+ }
+
+ // Ensure at least one config was found.
+ if config == nil {
+ c.Ui.Output("No configuration files found.")
+ return 1
+ }
+
+ // Ensure that a backend is provided
+ if config.Storage == nil {
+ c.Ui.Output("A storage backend must be specified")
+ return 1
+ }
+
+ // If mlockall(2) isn't supported, show a warning. We disable this
+ // in dev because it is quite scary to see when first using Vault.
+ if !dev && !mlock.Supported() {
+ c.Ui.Output("==> WARNING: mlock not supported on this system!\n")
+ c.Ui.Output(" An `mlockall(2)`-like syscall to prevent memory from being")
+ c.Ui.Output(" swapped to disk is not supported on this system. Running")
+ c.Ui.Output(" Vault on an mlockall(2) enabled system is much more secure.\n")
+ }
+
+ if err := c.setupTelemetry(config); err != nil {
+ c.Ui.Output(fmt.Sprintf("Error initializing telemetry: %s", err))
+ return 1
+ }
+
+ // Initialize the backend
+ backend, err := physical.NewBackend(
+ config.Storage.Type, c.logger, config.Storage.Config)
+ if err != nil {
+ c.Ui.Output(fmt.Sprintf(
+ "Error initializing storage of type %s: %s",
+ config.Storage.Type, err))
+ return 1
+ }
+
+ infoKeys := make([]string, 0, 10)
+ info := make(map[string]string)
+
+ var seal vault.Seal = &vault.DefaultSeal{}
+
+ // Ensure that the seal finalizer is called, even if using verify-only
+ defer func() {
+ if seal != nil {
+ err = seal.Finalize()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Error finalizing seals: %v", err))
+ }
+ }
+ }()
+
+ if seal == nil {
+ c.Ui.Error(fmt.Sprintf("Could not create seal"))
+ return 1
+ }
+
+ coreConfig := &vault.CoreConfig{
+ Physical: backend,
+ RedirectAddr: config.Storage.RedirectAddr,
+ HAPhysical: nil,
+ Seal: seal,
+ AuditBackends: c.AuditBackends,
+ CredentialBackends: c.CredentialBackends,
+ LogicalBackends: c.LogicalBackends,
+ Logger: c.logger,
+ DisableCache: config.DisableCache,
+ DisableMlock: config.DisableMlock,
+ MaxLeaseTTL: config.MaxLeaseTTL,
+ DefaultLeaseTTL: config.DefaultLeaseTTL,
+ ClusterName: config.ClusterName,
+ CacheSize: config.CacheSize,
+ }
+ if dev {
+ coreConfig.DevToken = devRootTokenID
+ }
+
+ var disableClustering bool
+
+ // Initialize the separate HA storage backend, if it exists
+ var ok bool
+ if config.HAStorage != nil {
+ habackend, err := physical.NewBackend(
+ config.HAStorage.Type, c.logger, config.HAStorage.Config)
+ if err != nil {
+ c.Ui.Output(fmt.Sprintf(
+ "Error initializing HA storage of type %s: %s",
+ config.HAStorage.Type, err))
+ return 1
+ }
+
+ if coreConfig.HAPhysical, ok = habackend.(physical.HABackend); !ok {
+ c.Ui.Output("Specified HA storage does not support HA")
+ return 1
+ }
+
+ if !coreConfig.HAPhysical.HAEnabled() {
+ c.Ui.Output("Specified HA storage has HA support disabled; please consult documentation")
+ return 1
+ }
+
+ coreConfig.RedirectAddr = config.HAStorage.RedirectAddr
+ disableClustering = config.HAStorage.DisableClustering
+ if !disableClustering {
+ coreConfig.ClusterAddr = config.HAStorage.ClusterAddr
+ }
+ } else {
+ if coreConfig.HAPhysical, ok = backend.(physical.HABackend); ok {
+ coreConfig.RedirectAddr = config.Storage.RedirectAddr
+ disableClustering = config.Storage.DisableClustering
+ if !disableClustering {
+ coreConfig.ClusterAddr = config.Storage.ClusterAddr
+ }
+ }
+ }
+
+ if envRA := os.Getenv("VAULT_REDIRECT_ADDR"); envRA != "" {
+ coreConfig.RedirectAddr = envRA
+ } else if envAA := os.Getenv("VAULT_ADVERTISE_ADDR"); envAA != "" {
+ coreConfig.RedirectAddr = envAA
+ }
+
+ // Attempt to detect the redirect address, if possible
+ var detect physical.RedirectDetect
+ if coreConfig.HAPhysical != nil && coreConfig.HAPhysical.HAEnabled() {
+ detect, ok = coreConfig.HAPhysical.(physical.RedirectDetect)
+ } else {
+ detect, ok = coreConfig.Physical.(physical.RedirectDetect)
+ }
+ if ok && coreConfig.RedirectAddr == "" {
+ redirect, err := c.detectRedirect(detect, config)
+ if err != nil {
+ c.Ui.Output(fmt.Sprintf("Error detecting redirect address: %s", err))
+ } else if redirect == "" {
+ c.Ui.Output("Failed to detect redirect address.")
+ } else {
+ coreConfig.RedirectAddr = redirect
+ }
+ }
+ if coreConfig.RedirectAddr == "" && dev {
+ coreConfig.RedirectAddr = fmt.Sprintf("http://%s", config.Listeners[0].Config["address"])
+ }
+
+ // After the redirect bits are sorted out, if no cluster address was
+ // explicitly given, derive one from the redirect addr
+ if disableClustering {
+ coreConfig.ClusterAddr = ""
+ } else if envCA := os.Getenv("VAULT_CLUSTER_ADDR"); envCA != "" {
+ coreConfig.ClusterAddr = envCA
+ } else {
+ var addrToUse string
+ switch {
+ case coreConfig.ClusterAddr == "" && coreConfig.RedirectAddr != "":
+ addrToUse = coreConfig.RedirectAddr
+ case dev:
+ addrToUse = fmt.Sprintf("http://%s", config.Listeners[0].Config["address"])
+ default:
+ goto CLUSTER_SYNTHESIS_COMPLETE
+ }
+ u, err := url.ParseRequestURI(addrToUse)
+ if err != nil {
+ c.Ui.Output(fmt.Sprintf("Error parsing synthesized cluster address %s: %v", addrToUse, err))
+ return 1
+ }
+ host, port, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ // This sucks, as it's a const in the function but not exported in the package
+ if strings.Contains(err.Error(), "missing port in address") {
+ host = u.Host
+ port = "443"
+ } else {
+ c.Ui.Output(fmt.Sprintf("Error parsing redirect address: %v", err))
+ return 1
+ }
+ }
+ nPort, err := strconv.Atoi(port)
+ if err != nil {
+ c.Ui.Output(fmt.Sprintf("Error parsing synthesized address; failed to convert %q to a numeric: %v", port, err))
+ return 1
+ }
+ u.Host = net.JoinHostPort(host, strconv.Itoa(nPort+1))
+ // Will always be TLS-secured
+ u.Scheme = "https"
+ coreConfig.ClusterAddr = u.String()
+ }
+
+CLUSTER_SYNTHESIS_COMPLETE:
+
+ if coreConfig.ClusterAddr != "" {
+ // Force https as we'll always be TLS-secured
+ u, err := url.ParseRequestURI(coreConfig.ClusterAddr)
+ if err != nil {
+ c.Ui.Output(fmt.Sprintf("Error parsing cluster address %s: %v", coreConfig.RedirectAddr, err))
+ return 1
+ }
+ u.Scheme = "https"
+ coreConfig.ClusterAddr = u.String()
+ }
+
+ // Initialize the core
+ core, newCoreError := vault.NewCore(coreConfig)
+ if newCoreError != nil {
+ if !errwrap.ContainsType(newCoreError, new(vault.NonFatalError)) {
+ c.Ui.Output(fmt.Sprintf("Error initializing core: %s", newCoreError))
+ return 1
+ }
+ }
+
+ // Copy the reload funcs pointers back
+ c.reloadFuncs = coreConfig.ReloadFuncs
+ c.reloadFuncsLock = coreConfig.ReloadFuncsLock
+
+ // Compile server information for output later
+ info["storage"] = config.Storage.Type
+ info["log level"] = logLevel
+ info["mlock"] = fmt.Sprintf(
+ "supported: %v, enabled: %v",
+ mlock.Supported(), !config.DisableMlock && mlock.Supported())
+ infoKeys = append(infoKeys, "log level", "mlock", "storage")
+
+ if coreConfig.ClusterAddr != "" {
+ info["cluster address"] = coreConfig.ClusterAddr
+ infoKeys = append(infoKeys, "cluster address")
+ }
+ if coreConfig.RedirectAddr != "" {
+ info["redirect address"] = coreConfig.RedirectAddr
+ infoKeys = append(infoKeys, "redirect address")
+ }
+
+ if config.HAStorage != nil {
+ info["HA storage"] = config.HAStorage.Type
+ infoKeys = append(infoKeys, "HA storage")
+ } else {
+ // If the storage supports HA, then note it
+ if coreConfig.HAPhysical != nil {
+ if coreConfig.HAPhysical.HAEnabled() {
+ info["storage"] += " (HA available)"
+ } else {
+ info["storage"] += " (HA disabled)"
+ }
+ }
+ }
+
+ clusterAddrs := []*net.TCPAddr{}
+
+ // Initialize the listeners
+ c.reloadFuncsLock.Lock()
+ lns := make([]net.Listener, 0, len(config.Listeners))
+ for i, lnConfig := range config.Listeners {
+ if lnConfig.Type == "atlas" {
+ if config.ClusterName == "" {
+ c.Ui.Output("cluster_name is not set in the config and is a required value")
+ return 1
+ }
+
+ lnConfig.Config["cluster_name"] = config.ClusterName
+ }
+
+ ln, props, reloadFunc, err := server.NewListener(lnConfig.Type, lnConfig.Config, logGate)
+ if err != nil {
+ c.Ui.Output(fmt.Sprintf(
+ "Error initializing listener of type %s: %s",
+ lnConfig.Type, err))
+ return 1
+ }
+
+ lns = append(lns, ln)
+
+ if reloadFunc != nil {
+ relSlice := (*c.reloadFuncs)["listener|"+lnConfig.Type]
+ relSlice = append(relSlice, reloadFunc)
+ (*c.reloadFuncs)["listener|"+lnConfig.Type] = relSlice
+ }
+
+ if !disableClustering && lnConfig.Type == "tcp" {
+ var addr string
+ var ok bool
+ if addr, ok = lnConfig.Config["cluster_address"]; ok {
+ tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
+ if err != nil {
+ c.Ui.Output(fmt.Sprintf(
+ "Error resolving cluster_address: %s",
+ err))
+ return 1
+ }
+ clusterAddrs = append(clusterAddrs, tcpAddr)
+ } else {
+ tcpAddr, ok := ln.Addr().(*net.TCPAddr)
+ if !ok {
+ c.Ui.Output("Failed to parse tcp listener")
+ return 1
+ }
+ clusterAddr := &net.TCPAddr{
+ IP: tcpAddr.IP,
+ Port: tcpAddr.Port + 1,
+ }
+ clusterAddrs = append(clusterAddrs, clusterAddr)
+ addr = clusterAddr.String()
+ }
+ props["cluster address"] = addr
+ }
+
+ // Store the listener props for output later
+ key := fmt.Sprintf("listener %d", i+1)
+ propsList := make([]string, 0, len(props))
+ for k, v := range props {
+ propsList = append(propsList, fmt.Sprintf(
+ "%s: %q", k, v))
+ }
+ sort.Strings(propsList)
+ infoKeys = append(infoKeys, key)
+ info[key] = fmt.Sprintf(
+ "%s (%s)", lnConfig.Type, strings.Join(propsList, ", "))
+
+ }
+ c.reloadFuncsLock.Unlock()
+ if !disableClustering {
+ if c.logger.IsTrace() {
+ c.logger.Trace("cluster listener addresses synthesized", "cluster_addresses", clusterAddrs)
+ }
+ }
+
+ // Make sure we close all listeners from this point on
+ listenerCloseFunc := func() {
+ for _, ln := range lns {
+ ln.Close()
+ }
+ }
+
+ defer c.cleanupGuard.Do(listenerCloseFunc)
+
+ infoKeys = append(infoKeys, "version")
+ verInfo := version.GetVersion()
+ info["version"] = verInfo.FullVersionNumber(false)
+ if verInfo.Revision != "" {
+ info["version sha"] = strings.Trim(verInfo.Revision, "'")
+ infoKeys = append(infoKeys, "version sha")
+ }
+ infoKeys = append(infoKeys, "cgo")
+ info["cgo"] = "disabled"
+ if version.CgoEnabled {
+ info["cgo"] = "enabled"
+ }
+
+ // Server configuration output
+ padding := 24
+ sort.Strings(infoKeys)
+ c.Ui.Output("==> Vault server configuration:\n")
+ for _, k := range infoKeys {
+ c.Ui.Output(fmt.Sprintf(
+ "%s%s: %s",
+ strings.Repeat(" ", padding-len(k)),
+ strings.Title(k),
+ info[k]))
+ }
+ c.Ui.Output("")
+
+ if verifyOnly {
+ return 0
+ }
+
+ // Perform service discovery registrations and initialization of
+ // HTTP server after the verifyOnly check.
+
+ // Instantiate the wait group
+ c.WaitGroup = &sync.WaitGroup{}
+
+ // If the backend supports service discovery, run service discovery
+ if coreConfig.HAPhysical != nil && coreConfig.HAPhysical.HAEnabled() {
+ sd, ok := coreConfig.HAPhysical.(physical.ServiceDiscovery)
+ if ok {
+ activeFunc := func() bool {
+ if isLeader, _, err := core.Leader(); err == nil {
+ return isLeader
+ }
+ return false
+ }
+
+ sealedFunc := func() bool {
+ if sealed, err := core.Sealed(); err == nil {
+ return sealed
+ }
+ return true
+ }
+
+ if err := sd.RunServiceDiscovery(c.WaitGroup, c.ShutdownCh, coreConfig.RedirectAddr, activeFunc, sealedFunc); err != nil {
+ c.Ui.Output(fmt.Sprintf("Error initializing service discovery: %v", err))
+ return 1
+ }
+ }
+ }
+
+ handler := vaulthttp.Handler(core)
+
+ // This needs to happen before we first unseal, so before we trigger dev
+ // mode if it's set
+ core.SetClusterListenerAddrs(clusterAddrs)
+ core.SetClusterSetupFuncs(vault.WrapHandlerForClustering(handler, c.logger))
+
+ // If we're in Dev mode, then initialize the core
+ if dev {
+ init, err := c.enableDev(core, devRootTokenID)
+ if err != nil {
+ c.Ui.Output(fmt.Sprintf(
+ "Error initializing Dev mode: %s", err))
+ return 1
+ }
+
+ export := "export"
+ quote := "'"
+ if runtime.GOOS == "windows" {
+ export = "set"
+ quote = ""
+ }
+
+ c.Ui.Output(fmt.Sprintf(
+ "==> WARNING: Dev mode is enabled!\n\n"+
+ "In this mode, Vault is completely in-memory and unsealed.\n"+
+ "Vault is configured to only have a single unseal key. The root\n"+
+ "token has already been authenticated with the CLI, so you can\n"+
+ "immediately begin using the Vault CLI.\n\n"+
+ "The only step you need to take is to set the following\n"+
+ "environment variables:\n\n"+
+ " "+export+" VAULT_ADDR="+quote+"http://"+config.Listeners[0].Config["address"]+quote+"\n\n"+
+ "The unseal key and root token are reproduced below in case you\n"+
+ "want to seal/unseal the Vault or play with authentication.\n\n"+
+ "Unseal Key: %s\nRoot Token: %s\n",
+ base64.StdEncoding.EncodeToString(init.SecretShares[0]),
+ init.RootToken,
+ ))
+ }
+
+ // Initialize the HTTP server
+ server := &http.Server{}
+ if err := http2.ConfigureServer(server, nil); err != nil {
+ c.Ui.Output(fmt.Sprintf("Error configuring server for HTTP/2: %s", err))
+ return 1
+ }
+ server.Handler = handler
+ for _, ln := range lns {
+ go server.Serve(ln)
+ }
+
+ if newCoreError != nil {
+ c.Ui.Output("==> Warning:\n\nNon-fatal error during initialization; check the logs for more information.")
+ c.Ui.Output("")
+ }
+
+ // Output the header that the server has started
+ c.Ui.Output("==> Vault server started! Log data will stream in below:\n")
+
+ // Release the log gate.
+ logGate.Flush()
+
+ // Wait for shutdown
+ shutdownTriggered := false
+
+ for !shutdownTriggered {
+ select {
+ case <-c.ShutdownCh:
+ c.Ui.Output("==> Vault shutdown triggered")
+
+ // Stop the listners so that we don't process further client requests.
+ c.cleanupGuard.Do(listenerCloseFunc)
+
+ // Shutdown will wait until after Vault is sealed, which means the
+ // request forwarding listeners will also be closed (and also
+ // waited for).
+ if err := core.Shutdown(); err != nil {
+ c.Ui.Output(fmt.Sprintf("Error with core shutdown: %s", err))
+ }
+
+ shutdownTriggered = true
+
+ case <-c.SighupCh:
+ c.Ui.Output("==> Vault reload triggered")
+ if err := c.Reload(configPath); err != nil {
+ c.Ui.Output(fmt.Sprintf("Error(s) were encountered during reload: %s", err))
+ }
+ }
+ }
+
+ // Wait for dependent goroutines to complete
+ c.WaitGroup.Wait()
+ return 0
+}
+
+func (c *ServerCommand) enableDev(core *vault.Core, rootTokenID string) (*vault.InitResult, error) {
+ // Initialize it with a basic single key
+ init, err := core.Initialize(&vault.InitParams{
+ BarrierConfig: &vault.SealConfig{
+ SecretShares: 1,
+ SecretThreshold: 1,
+ },
+ RecoveryConfig: nil,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Copy the key so that it can be zeroed
+ key := make([]byte, len(init.SecretShares[0]))
+ copy(key, init.SecretShares[0])
+
+ // Unseal the core
+ unsealed, err := core.Unseal(key)
+ if err != nil {
+ return nil, err
+ }
+ if !unsealed {
+ return nil, fmt.Errorf("failed to unseal Vault for dev mode")
+ }
+
+ isLeader, _, err := core.Leader()
+ if err != nil && err != vault.ErrHANotEnabled {
+ return nil, fmt.Errorf("failed to check active status: %v", err)
+ }
+ if err == nil {
+ leaderCount := 5
+ for !isLeader {
+ if leaderCount == 0 {
+ buf := make([]byte, 1<<16)
+ runtime.Stack(buf, true)
+ return nil, fmt.Errorf("failed to get active status after five seconds; call stack is\n%s\n", buf)
+ }
+ time.Sleep(1 * time.Second)
+ isLeader, _, err = core.Leader()
+ if err != nil {
+ return nil, fmt.Errorf("failed to check active status: %v", err)
+ }
+ leaderCount--
+ }
+ }
+
+ if rootTokenID != "" {
+ req := &logical.Request{
+ ID: "dev-gen-root",
+ Operation: logical.UpdateOperation,
+ ClientToken: init.RootToken,
+ Path: "auth/token/create",
+ Data: map[string]interface{}{
+ "id": rootTokenID,
+ "policies": []string{"root"},
+ "no_parent": true,
+ "no_default_policy": true,
+ },
+ }
+ resp, err := core.HandleRequest(req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create root token with ID %s: %s", rootTokenID, err)
+ }
+ if resp == nil {
+ return nil, fmt.Errorf("nil response when creating root token with ID %s", rootTokenID)
+ }
+ if resp.Auth == nil {
+ return nil, fmt.Errorf("nil auth when creating root token with ID %s", rootTokenID)
+ }
+
+ init.RootToken = resp.Auth.ClientToken
+
+ req.ID = "dev-revoke-init-root"
+ req.Path = "auth/token/revoke-self"
+ req.Data = nil
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to revoke initial root token: %s", err)
+ }
+ }
+
+ // Set the token
+ tokenHelper, err := c.TokenHelper()
+ if err != nil {
+ return nil, err
+ }
+ if err := tokenHelper.Store(init.RootToken); err != nil {
+ return nil, err
+ }
+
+ return init, nil
+}
+
+// detectRedirect is used to attempt redirect address detection
+func (c *ServerCommand) detectRedirect(detect physical.RedirectDetect,
+ config *server.Config) (string, error) {
+ // Get the hostname
+ host, err := detect.DetectHostAddr()
+ if err != nil {
+ return "", err
+ }
+
+ // set [] for ipv6 addresses
+ if strings.Contains(host, ":") && !strings.Contains(host, "]") {
+ host = "[" + host + "]"
+ }
+
+ // Default the port and scheme
+ scheme := "https"
+ port := 8200
+
+ // Attempt to detect overrides
+ for _, list := range config.Listeners {
+ // Only attempt TCP
+ if list.Type != "tcp" {
+ continue
+ }
+
+ // Check if TLS is disabled
+ if val, ok := list.Config["tls_disable"]; ok {
+ disable, err := strconv.ParseBool(val)
+ if err != nil {
+ return "", fmt.Errorf("tls_disable: %s", err)
+ }
+
+ if disable {
+ scheme = "http"
+ }
+ }
+
+ // Check for address override
+ addr, ok := list.Config["address"]
+ if !ok {
+ addr = "127.0.0.1:8200"
+ }
+
+ // Check for localhost
+ hostStr, portStr, err := net.SplitHostPort(addr)
+ if err != nil {
+ continue
+ }
+ if hostStr == "127.0.0.1" {
+ host = hostStr
+ }
+
+ // Check for custom port
+ listPort, err := strconv.Atoi(portStr)
+ if err != nil {
+ continue
+ }
+ port = listPort
+ }
+
+ // Build a URL
+ url := &url.URL{
+ Scheme: scheme,
+ Host: fmt.Sprintf("%s:%d", host, port),
+ }
+
+ // Return the URL string
+ return url.String(), nil
+}
+
+// setupTelemetry is used to setup the telemetry sub-systems
+func (c *ServerCommand) setupTelemetry(config *server.Config) error {
+ /* Setup telemetry
+ Aggregate on 10 second intervals for 1 minute. Expose the
+ metrics over stderr when there is a SIGUSR1 received.
+ */
+ inm := metrics.NewInmemSink(10*time.Second, time.Minute)
+ metrics.DefaultInmemSignal(inm)
+
+ var telConfig *server.Telemetry
+ if config.Telemetry == nil {
+ telConfig = &server.Telemetry{}
+ } else {
+ telConfig = config.Telemetry
+ }
+
+ metricsConf := metrics.DefaultConfig("vault")
+ metricsConf.EnableHostname = !telConfig.DisableHostname
+
+ // Configure the statsite sink
+ var fanout metrics.FanoutSink
+ if telConfig.StatsiteAddr != "" {
+ sink, err := metrics.NewStatsiteSink(telConfig.StatsiteAddr)
+ if err != nil {
+ return err
+ }
+ fanout = append(fanout, sink)
+ }
+
+ // Configure the statsd sink
+ if telConfig.StatsdAddr != "" {
+ sink, err := metrics.NewStatsdSink(telConfig.StatsdAddr)
+ if err != nil {
+ return err
+ }
+ fanout = append(fanout, sink)
+ }
+
+ // Configure the Circonus sink
+ if telConfig.CirconusAPIToken != "" || telConfig.CirconusCheckSubmissionURL != "" {
+ cfg := &circonus.Config{}
+ cfg.Interval = telConfig.CirconusSubmissionInterval
+ cfg.CheckManager.API.TokenKey = telConfig.CirconusAPIToken
+ cfg.CheckManager.API.TokenApp = telConfig.CirconusAPIApp
+ cfg.CheckManager.API.URL = telConfig.CirconusAPIURL
+ cfg.CheckManager.Check.SubmissionURL = telConfig.CirconusCheckSubmissionURL
+ cfg.CheckManager.Check.ID = telConfig.CirconusCheckID
+ cfg.CheckManager.Check.ForceMetricActivation = telConfig.CirconusCheckForceMetricActivation
+ cfg.CheckManager.Check.InstanceID = telConfig.CirconusCheckInstanceID
+ cfg.CheckManager.Check.SearchTag = telConfig.CirconusCheckSearchTag
+ cfg.CheckManager.Check.DisplayName = telConfig.CirconusCheckDisplayName
+ cfg.CheckManager.Check.Tags = telConfig.CirconusCheckTags
+ cfg.CheckManager.Broker.ID = telConfig.CirconusBrokerID
+ cfg.CheckManager.Broker.SelectTag = telConfig.CirconusBrokerSelectTag
+
+ if cfg.CheckManager.API.TokenApp == "" {
+ cfg.CheckManager.API.TokenApp = "vault"
+ }
+
+ if cfg.CheckManager.Check.DisplayName == "" {
+ cfg.CheckManager.Check.DisplayName = "Vault"
+ }
+
+ if cfg.CheckManager.Check.SearchTag == "" {
+ cfg.CheckManager.Check.SearchTag = "service:vault"
+ }
+
+ sink, err := circonus.NewCirconusSink(cfg)
+ if err != nil {
+ return err
+ }
+ sink.Start()
+ fanout = append(fanout, sink)
+ }
+
+ // Initialize the global sink
+ if len(fanout) > 0 {
+ fanout = append(fanout, inm)
+ metrics.NewGlobal(metricsConf, fanout)
+ } else {
+ metricsConf.EnableHostname = false
+ metrics.NewGlobal(metricsConf, inm)
+ }
+ return nil
+}
+
+func (c *ServerCommand) Reload(configPath []string) error {
+ c.reloadFuncsLock.RLock()
+ defer c.reloadFuncsLock.RUnlock()
+
+ var reloadErrors *multierror.Error
+
+ // Read the new config
+ var config *server.Config
+ for _, path := range configPath {
+ current, err := server.LoadConfig(path, c.logger)
+ if err != nil {
+ reloadErrors = multierror.Append(reloadErrors, fmt.Errorf("Error loading configuration from %s: %s", path, err))
+ goto audit
+ }
+
+ if config == nil {
+ config = current
+ } else {
+ config = config.Merge(current)
+ }
+ }
+
+ // Ensure at least one config was found.
+ if config == nil {
+ reloadErrors = multierror.Append(reloadErrors, fmt.Errorf("No configuration files found"))
+ goto audit
+ }
+
+ // Call reload on the listeners. This will call each listener with each
+ // config block, but they verify the address.
+ for _, lnConfig := range config.Listeners {
+ for _, relFunc := range (*c.reloadFuncs)["listener|"+lnConfig.Type] {
+ if err := relFunc(lnConfig.Config); err != nil {
+ reloadErrors = multierror.Append(reloadErrors, fmt.Errorf("Error encountered reloading configuration: %s", err))
+ goto audit
+ }
+ }
+ }
+
+audit:
+ // file audit reload funcs
+ for k, relFuncs := range *c.reloadFuncs {
+ if !strings.HasPrefix(k, "audit_file|") {
+ continue
+ }
+ for _, relFunc := range relFuncs {
+ if relFunc != nil {
+ if err := relFunc(nil); err != nil {
+ reloadErrors = multierror.Append(reloadErrors, fmt.Errorf("Error encountered reloading file audit backend at path %s: %v", strings.TrimPrefix(k, "audit_file|"), err))
+ }
+ }
+ }
+ }
+
+ return reloadErrors.ErrorOrNil()
+}
+
+func (c *ServerCommand) Synopsis() string {
+ return "Start a Vault server"
+}
+
+func (c *ServerCommand) Help() string {
+ helpText := `
+Usage: vault server [options]
+
+ Start a Vault server.
+
+ This command starts a Vault server that responds to API requests.
+ Vault will start in a "sealed" state. The Vault must be unsealed
+ with "vault unseal" or the API before this server can respond to requests.
+ This must be done for every server.
+
+ If the server is being started against a storage backend that is
+ brand new (no existing Vault data in it), it must be initialized with
+ "vault init" or the API first.
+
+
+General Options:
+
+ -config= Path to the configuration file or directory. This can
+ be specified multiple times. If it is a directory,
+ all files with a ".hcl" or ".json" suffix will be
+ loaded.
+
+ -dev Enables Dev mode. In this mode, Vault is completely
+ in-memory and unsealed. Do not run the Dev server in
+ production!
+
+ -dev-root-token-id="" If set, the root token returned in Dev mode will have
+ the given ID. This *only* has an effect when running
+ in Dev mode. Can also be specified with the
+ VAULT_DEV_ROOT_TOKEN_ID environment variable.
+
+ -dev-listen-address="" If set, this overrides the normal Dev mode listen
+ address of "127.0.0.1:8200". Can also be specified
+ with the VAULT_DEV_LISTEN_ADDRESS environment
+ variable.
+
+ -log-level=info Log verbosity. Defaults to "info", will be output to
+ stderr. Supported values: "trace", "debug", "info",
+ "warn", "err"
+`
+ return strings.TrimSpace(helpText)
+}
+
+// MakeShutdownCh returns a channel that can be used for shutdown
+// notifications for commands. This channel will send a message for every
+// SIGINT or SIGTERM received.
+func MakeShutdownCh() chan struct{} {
+ resultCh := make(chan struct{})
+
+ shutdownCh := make(chan os.Signal, 4)
+ signal.Notify(shutdownCh, os.Interrupt, syscall.SIGTERM)
+ go func() {
+ <-shutdownCh
+ close(resultCh)
+ }()
+ return resultCh
+}
+
+// MakeSighupCh returns a channel that can be used for SIGHUP
+// reloading. This channel will send a message for every
+// SIGHUP received.
+func MakeSighupCh() chan struct{} {
+ resultCh := make(chan struct{})
+
+ signalCh := make(chan os.Signal, 4)
+ signal.Notify(signalCh, syscall.SIGHUP)
+ go func() {
+ for {
+ <-signalCh
+ resultCh <- struct{}{}
+ }
+ }()
+ return resultCh
+}
+
+type grpclogFaker struct {
+ logger log.Logger
+}
+
+func (g *grpclogFaker) Fatal(args ...interface{}) {
+ g.logger.Error(fmt.Sprint(args...))
+ os.Exit(1)
+}
+
+func (g *grpclogFaker) Fatalf(format string, args ...interface{}) {
+ g.logger.Error(fmt.Sprintf(format, args...))
+ os.Exit(1)
+}
+
+func (g *grpclogFaker) Fatalln(args ...interface{}) {
+ g.logger.Error(fmt.Sprintln(args...))
+ os.Exit(1)
+}
+
+func (g *grpclogFaker) Print(args ...interface{}) {
+ g.logger.Warn(fmt.Sprint(args...))
+}
+
+func (g *grpclogFaker) Printf(format string, args ...interface{}) {
+ g.logger.Warn(fmt.Sprintf(format, args...))
+}
+
+func (g *grpclogFaker) Println(args ...interface{}) {
+ g.logger.Warn(fmt.Sprintln(args...))
+}
diff --git a/vendor/github.com/hashicorp/vault/command/server/config.go b/vendor/github.com/hashicorp/vault/command/server/config.go
new file mode 100644
index 0000000..e6ea123
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/server/config.go
@@ -0,0 +1,778 @@
+package server
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "time"
+
+ log "github.com/mgutz/logxi/v1"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/hcl"
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/vault/helper/parseutil"
+)
+
+// Config is the configuration for the vault server.
+type Config struct {
+ Listeners []*Listener `hcl:"-"`
+ Storage *Storage `hcl:"-"`
+ HAStorage *Storage `hcl:"-"`
+
+ HSM *HSM `hcl:"-"`
+
+ CacheSize int `hcl:"cache_size"`
+ DisableCache bool `hcl:"-"`
+ DisableCacheRaw interface{} `hcl:"disable_cache"`
+ DisableMlock bool `hcl:"-"`
+ DisableMlockRaw interface{} `hcl:"disable_mlock"`
+
+ EnableUI bool `hcl:"-"`
+ EnableUIRaw interface{} `hcl:"ui"`
+
+ Telemetry *Telemetry `hcl:"telemetry"`
+
+ MaxLeaseTTL time.Duration `hcl:"-"`
+ MaxLeaseTTLRaw interface{} `hcl:"max_lease_ttl"`
+ DefaultLeaseTTL time.Duration `hcl:"-"`
+ DefaultLeaseTTLRaw interface{} `hcl:"default_lease_ttl"`
+
+ ClusterName string `hcl:"cluster_name"`
+}
+
+// DevConfig is a Config that is used for dev mode of Vault.
+func DevConfig(ha, transactional bool) *Config {
+ ret := &Config{
+ DisableCache: false,
+ DisableMlock: true,
+
+ Storage: &Storage{
+ Type: "inmem",
+ },
+
+ Listeners: []*Listener{
+ &Listener{
+ Type: "tcp",
+ Config: map[string]string{
+ "address": "127.0.0.1:8200",
+ "tls_disable": "1",
+ },
+ },
+ },
+
+ EnableUI: true,
+
+ Telemetry: &Telemetry{},
+
+ MaxLeaseTTL: 32 * 24 * time.Hour,
+ DefaultLeaseTTL: 32 * 24 * time.Hour,
+ }
+
+ switch {
+ case ha && transactional:
+ ret.Storage.Type = "inmem_transactional_ha"
+ case !ha && transactional:
+ ret.Storage.Type = "inmem_transactional"
+ case ha && !transactional:
+ ret.Storage.Type = "inmem_ha"
+ }
+
+ return ret
+}
+
+// Listener is the listener configuration for the server.
+type Listener struct {
+ Type string
+ Config map[string]string
+}
+
+func (l *Listener) GoString() string {
+ return fmt.Sprintf("*%#v", *l)
+}
+
+// Storage is the underlying storage configuration for the server.
+type Storage struct {
+ Type string
+ RedirectAddr string
+ ClusterAddr string
+ DisableClustering bool
+ Config map[string]string
+}
+
+func (b *Storage) GoString() string {
+ return fmt.Sprintf("*%#v", *b)
+}
+
+// HSM contains HSM configuration for the server
+type HSM struct {
+ Type string
+ Config map[string]string
+}
+
+func (h *HSM) GoString() string {
+ return fmt.Sprintf("*%#v", *h)
+}
+
+// Telemetry is the telemetry configuration for the server
+type Telemetry struct {
+ StatsiteAddr string `hcl:"statsite_address"`
+ StatsdAddr string `hcl:"statsd_address"`
+
+ DisableHostname bool `hcl:"disable_hostname"`
+
+ // Circonus: see https://github.com/circonus-labs/circonus-gometrics
+ // for more details on the various configuration options.
+ // Valid configuration combinations:
+ // - CirconusAPIToken
+ // metric management enabled (search for existing check or create a new one)
+ // - CirconusSubmissionUrl
+ // metric management disabled (use check with specified submission_url,
+ // broker must be using a public SSL certificate)
+ // - CirconusAPIToken + CirconusCheckSubmissionURL
+ // metric management enabled (use check with specified submission_url)
+ // - CirconusAPIToken + CirconusCheckID
+ // metric management enabled (use check with specified id)
+
+ // CirconusAPIToken is a valid API Token used to create/manage check. If provided,
+ // metric management is enabled.
+ // Default: none
+ CirconusAPIToken string `hcl:"circonus_api_token"`
+ // CirconusAPIApp is an app name associated with API token.
+ // Default: "consul"
+ CirconusAPIApp string `hcl:"circonus_api_app"`
+ // CirconusAPIURL is the base URL to use for contacting the Circonus API.
+ // Default: "https://api.circonus.com/v2"
+ CirconusAPIURL string `hcl:"circonus_api_url"`
+ // CirconusSubmissionInterval is the interval at which metrics are submitted to Circonus.
+ // Default: 10s
+ CirconusSubmissionInterval string `hcl:"circonus_submission_interval"`
+ // CirconusCheckSubmissionURL is the check.config.submission_url field from a
+ // previously created HTTPTRAP check.
+ // Default: none
+ CirconusCheckSubmissionURL string `hcl:"circonus_submission_url"`
+ // CirconusCheckID is the check id (not check bundle id) from a previously created
+ // HTTPTRAP check. The numeric portion of the check._cid field.
+ // Default: none
+ CirconusCheckID string `hcl:"circonus_check_id"`
+ // CirconusCheckForceMetricActivation will force enabling metrics, as they are encountered,
+ // if the metric already exists and is NOT active. If check management is enabled, the default
+ // behavior is to add new metrics as they are encoutered. If the metric already exists in the
+ // check, it will *NOT* be activated. This setting overrides that behavior.
+ // Default: "false"
+ CirconusCheckForceMetricActivation string `hcl:"circonus_check_force_metric_activation"`
+ // CirconusCheckInstanceID serves to uniquely identify the metrics comming from this "instance".
+ // It can be used to maintain metric continuity with transient or ephemeral instances as
+ // they move around within an infrastructure.
+ // Default: hostname:app
+ CirconusCheckInstanceID string `hcl:"circonus_check_instance_id"`
+ // CirconusCheckSearchTag is a special tag which, when coupled with the instance id, helps to
+ // narrow down the search results when neither a Submission URL or Check ID is provided.
+ // Default: service:app (e.g. service:consul)
+ CirconusCheckSearchTag string `hcl:"circonus_check_search_tag"`
+ // CirconusCheckTags is a comma separated list of tags to apply to the check. Note that
+ // the value of CirconusCheckSearchTag will always be added to the check.
+ // Default: none
+ CirconusCheckTags string `mapstructure:"circonus_check_tags"`
+ // CirconusCheckDisplayName is the name for the check which will be displayed in the Circonus UI.
+ // Default: value of CirconusCheckInstanceID
+ CirconusCheckDisplayName string `mapstructure:"circonus_check_display_name"`
+ // CirconusBrokerID is an explicit broker to use when creating a new check. The numeric portion
+ // of broker._cid. If metric management is enabled and neither a Submission URL nor Check ID
+ // is provided, an attempt will be made to search for an existing check using Instance ID and
+ // Search Tag. If one is not found, a new HTTPTRAP check will be created.
+ // Default: use Select Tag if provided, otherwise, a random Enterprise Broker associated
+ // with the specified API token or the default Circonus Broker.
+ // Default: none
+ CirconusBrokerID string `hcl:"circonus_broker_id"`
+ // CirconusBrokerSelectTag is a special tag which will be used to select a broker when
+ // a Broker ID is not provided. The best use of this is to as a hint for which broker
+ // should be used based on *where* this particular instance is running.
+ // (e.g. a specific geo location or datacenter, dc:sfo)
+ // Default: none
+ CirconusBrokerSelectTag string `hcl:"circonus_broker_select_tag"`
+}
+
+func (s *Telemetry) GoString() string {
+ return fmt.Sprintf("*%#v", *s)
+}
+
+// Merge merges two configurations.
+func (c *Config) Merge(c2 *Config) *Config {
+ if c2 == nil {
+ return c
+ }
+
+ result := new(Config)
+ for _, l := range c.Listeners {
+ result.Listeners = append(result.Listeners, l)
+ }
+ for _, l := range c2.Listeners {
+ result.Listeners = append(result.Listeners, l)
+ }
+
+ result.Storage = c.Storage
+ if c2.Storage != nil {
+ result.Storage = c2.Storage
+ }
+
+ result.HAStorage = c.HAStorage
+ if c2.HAStorage != nil {
+ result.HAStorage = c2.HAStorage
+ }
+
+ result.HSM = c.HSM
+ if c2.HSM != nil {
+ result.HSM = c2.HSM
+ }
+
+ result.Telemetry = c.Telemetry
+ if c2.Telemetry != nil {
+ result.Telemetry = c2.Telemetry
+ }
+
+ result.CacheSize = c.CacheSize
+ if c2.CacheSize != 0 {
+ result.CacheSize = c2.CacheSize
+ }
+
+ // merging these booleans via an OR operation
+ result.DisableCache = c.DisableCache
+ if c2.DisableCache {
+ result.DisableCache = c2.DisableCache
+ }
+
+ result.DisableMlock = c.DisableMlock
+ if c2.DisableMlock {
+ result.DisableMlock = c2.DisableMlock
+ }
+
+ // merge these integers via a MAX operation
+ result.MaxLeaseTTL = c.MaxLeaseTTL
+ if c2.MaxLeaseTTL > result.MaxLeaseTTL {
+ result.MaxLeaseTTL = c2.MaxLeaseTTL
+ }
+
+ result.DefaultLeaseTTL = c.DefaultLeaseTTL
+ if c2.DefaultLeaseTTL > result.DefaultLeaseTTL {
+ result.DefaultLeaseTTL = c2.DefaultLeaseTTL
+ }
+
+ result.ClusterName = c.ClusterName
+ if c2.ClusterName != "" {
+ result.ClusterName = c2.ClusterName
+ }
+
+ result.EnableUI = c.EnableUI
+ if c2.EnableUI {
+ result.EnableUI = c2.EnableUI
+ }
+
+ return result
+}
+
+// LoadConfig loads the configuration at the given path, regardless if
+// its a file or directory.
+func LoadConfig(path string, logger log.Logger) (*Config, error) {
+ fi, err := os.Stat(path)
+ if err != nil {
+ return nil, err
+ }
+
+ if fi.IsDir() {
+ return LoadConfigDir(path, logger)
+ } else {
+ return LoadConfigFile(path, logger)
+ }
+}
+
+// LoadConfigFile loads the configuration from the given file.
+func LoadConfigFile(path string, logger log.Logger) (*Config, error) {
+ // Read the file
+ d, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+ return ParseConfig(string(d), logger)
+}
+
+func ParseConfig(d string, logger log.Logger) (*Config, error) {
+ // Parse!
+ obj, err := hcl.Parse(d)
+ if err != nil {
+ return nil, err
+ }
+
+ // Start building the result
+ var result Config
+ if err := hcl.DecodeObject(&result, obj); err != nil {
+ return nil, err
+ }
+
+ if result.MaxLeaseTTLRaw != nil {
+ if result.MaxLeaseTTL, err = parseutil.ParseDurationSecond(result.MaxLeaseTTLRaw); err != nil {
+ return nil, err
+ }
+ }
+ if result.DefaultLeaseTTLRaw != nil {
+ if result.DefaultLeaseTTL, err = parseutil.ParseDurationSecond(result.DefaultLeaseTTLRaw); err != nil {
+ return nil, err
+ }
+ }
+
+ if result.EnableUIRaw != nil {
+ if result.EnableUI, err = parseutil.ParseBool(result.EnableUIRaw); err != nil {
+ return nil, err
+ }
+ }
+
+ if result.DisableCacheRaw != nil {
+ if result.DisableCache, err = parseutil.ParseBool(result.DisableCacheRaw); err != nil {
+ return nil, err
+ }
+ }
+
+ if result.DisableMlockRaw != nil {
+ if result.DisableMlock, err = parseutil.ParseBool(result.DisableMlockRaw); err != nil {
+ return nil, err
+ }
+ }
+
+ list, ok := obj.Node.(*ast.ObjectList)
+ if !ok {
+ return nil, fmt.Errorf("error parsing: file doesn't contain a root object")
+ }
+
+ valid := []string{
+ "atlas",
+ "storage",
+ "ha_storage",
+ "backend",
+ "ha_backend",
+ "hsm",
+ "listener",
+ "cache_size",
+ "disable_cache",
+ "disable_mlock",
+ "ui",
+ "telemetry",
+ "default_lease_ttl",
+ "max_lease_ttl",
+ "cluster_name",
+ }
+ if err := checkHCLKeys(list, valid); err != nil {
+ return nil, err
+ }
+
+ // Look for storage but still support old backend
+ if o := list.Filter("storage"); len(o.Items) > 0 {
+ if err := parseStorage(&result, o, "storage"); err != nil {
+ return nil, fmt.Errorf("error parsing 'storage': %s", err)
+ }
+ } else {
+ if o := list.Filter("backend"); len(o.Items) > 0 {
+ if err := parseStorage(&result, o, "backend"); err != nil {
+ return nil, fmt.Errorf("error parsing 'backend': %s", err)
+ }
+ }
+ }
+
+ if o := list.Filter("ha_storage"); len(o.Items) > 0 {
+ if err := parseHAStorage(&result, o, "ha_storage"); err != nil {
+ return nil, fmt.Errorf("error parsing 'ha_storage': %s", err)
+ }
+ } else {
+ if o := list.Filter("ha_backend"); len(o.Items) > 0 {
+ if err := parseHAStorage(&result, o, "ha_backend"); err != nil {
+ return nil, fmt.Errorf("error parsing 'ha_backend': %s", err)
+ }
+ }
+ }
+
+ if o := list.Filter("hsm"); len(o.Items) > 0 {
+ if err := parseHSMs(&result, o); err != nil {
+ return nil, fmt.Errorf("error parsing 'hsm': %s", err)
+ }
+ }
+
+ if o := list.Filter("listener"); len(o.Items) > 0 {
+ if err := parseListeners(&result, o); err != nil {
+ return nil, fmt.Errorf("error parsing 'listener': %s", err)
+ }
+ }
+
+ if o := list.Filter("telemetry"); len(o.Items) > 0 {
+ if err := parseTelemetry(&result, o); err != nil {
+ return nil, fmt.Errorf("error parsing 'telemetry': %s", err)
+ }
+ }
+
+ return &result, nil
+}
+
+// LoadConfigDir loads all the configurations in the given directory
+// in alphabetical order.
+func LoadConfigDir(dir string, logger log.Logger) (*Config, error) {
+ f, err := os.Open(dir)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ fi, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+ if !fi.IsDir() {
+ return nil, fmt.Errorf(
+ "configuration path must be a directory: %s",
+ dir)
+ }
+
+ var files []string
+ err = nil
+ for err != io.EOF {
+ var fis []os.FileInfo
+ fis, err = f.Readdir(128)
+ if err != nil && err != io.EOF {
+ return nil, err
+ }
+
+ for _, fi := range fis {
+ // Ignore directories
+ if fi.IsDir() {
+ continue
+ }
+
+ // Only care about files that are valid to load.
+ name := fi.Name()
+ skip := true
+ if strings.HasSuffix(name, ".hcl") {
+ skip = false
+ } else if strings.HasSuffix(name, ".json") {
+ skip = false
+ }
+ if skip || isTemporaryFile(name) {
+ continue
+ }
+
+ path := filepath.Join(dir, name)
+ files = append(files, path)
+ }
+ }
+
+ var result *Config
+ for _, f := range files {
+ config, err := LoadConfigFile(f, logger)
+ if err != nil {
+ return nil, fmt.Errorf("Error loading %s: %s", f, err)
+ }
+
+ if result == nil {
+ result = config
+ } else {
+ result = result.Merge(config)
+ }
+ }
+
+ return result, nil
+}
+
+// isTemporaryFile returns true or false depending on whether the
+// provided file name is a temporary file for the following editors:
+// emacs or vim.
+func isTemporaryFile(name string) bool {
+ return strings.HasSuffix(name, "~") || // vim
+ strings.HasPrefix(name, ".#") || // emacs
+ (strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#")) // emacs
+}
+
+func parseStorage(result *Config, list *ast.ObjectList, name string) error {
+ if len(list.Items) > 1 {
+ return fmt.Errorf("only one %q block is permitted", name)
+ }
+
+ // Get our item
+ item := list.Items[0]
+
+ key := name
+ if len(item.Keys) > 0 {
+ key = item.Keys[0].Token.Value().(string)
+ }
+
+ var m map[string]string
+ if err := hcl.DecodeObject(&m, item.Val); err != nil {
+ return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key))
+ }
+
+ // Pull out the redirect address since it's common to all backends
+ var redirectAddr string
+ if v, ok := m["redirect_addr"]; ok {
+ redirectAddr = v
+ delete(m, "redirect_addr")
+ } else if v, ok := m["advertise_addr"]; ok {
+ redirectAddr = v
+ delete(m, "advertise_addr")
+ }
+
+ // Pull out the cluster address since it's common to all backends
+ var clusterAddr string
+ if v, ok := m["cluster_addr"]; ok {
+ clusterAddr = v
+ delete(m, "cluster_addr")
+ }
+
+ var disableClustering bool
+ var err error
+ if v, ok := m["disable_clustering"]; ok {
+ disableClustering, err = strconv.ParseBool(v)
+ if err != nil {
+ return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key))
+ }
+ delete(m, "disable_clustering")
+ }
+
+ result.Storage = &Storage{
+ RedirectAddr: redirectAddr,
+ ClusterAddr: clusterAddr,
+ DisableClustering: disableClustering,
+ Type: strings.ToLower(key),
+ Config: m,
+ }
+ return nil
+}
+
+func parseHAStorage(result *Config, list *ast.ObjectList, name string) error {
+ if len(list.Items) > 1 {
+ return fmt.Errorf("only one %q block is permitted", name)
+ }
+
+ // Get our item
+ item := list.Items[0]
+
+ key := name
+ if len(item.Keys) > 0 {
+ key = item.Keys[0].Token.Value().(string)
+ }
+
+ var m map[string]string
+ if err := hcl.DecodeObject(&m, item.Val); err != nil {
+ return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key))
+ }
+
+ // Pull out the redirect address since it's common to all backends
+ var redirectAddr string
+ if v, ok := m["redirect_addr"]; ok {
+ redirectAddr = v
+ delete(m, "redirect_addr")
+ } else if v, ok := m["advertise_addr"]; ok {
+ redirectAddr = v
+ delete(m, "advertise_addr")
+ }
+
+ // Pull out the cluster address since it's common to all backends
+ var clusterAddr string
+ if v, ok := m["cluster_addr"]; ok {
+ clusterAddr = v
+ delete(m, "cluster_addr")
+ }
+
+ var disableClustering bool
+ var err error
+ if v, ok := m["disable_clustering"]; ok {
+ disableClustering, err = strconv.ParseBool(v)
+ if err != nil {
+ return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key))
+ }
+ delete(m, "disable_clustering")
+ }
+
+ result.HAStorage = &Storage{
+ RedirectAddr: redirectAddr,
+ ClusterAddr: clusterAddr,
+ DisableClustering: disableClustering,
+ Type: strings.ToLower(key),
+ Config: m,
+ }
+ return nil
+}
+
+func parseHSMs(result *Config, list *ast.ObjectList) error {
+ if len(list.Items) > 1 {
+ return fmt.Errorf("only one 'hsm' block is permitted")
+ }
+
+ // Get our item
+ item := list.Items[0]
+
+ key := "hsm"
+ if len(item.Keys) > 0 {
+ key = item.Keys[0].Token.Value().(string)
+ }
+
+ valid := []string{
+ "lib",
+ "slot",
+ "pin",
+ "mechanism",
+ "key_label",
+ "generate_key",
+ "regenerate_key",
+ }
+ if err := checkHCLKeys(item.Val, valid); err != nil {
+ return multierror.Prefix(err, fmt.Sprintf("hsm.%s:", key))
+ }
+
+ var m map[string]string
+ if err := hcl.DecodeObject(&m, item.Val); err != nil {
+ return multierror.Prefix(err, fmt.Sprintf("hsm.%s:", key))
+ }
+
+ result.HSM = &HSM{
+ Type: strings.ToLower(key),
+ Config: m,
+ }
+
+ return nil
+}
+
+func parseListeners(result *Config, list *ast.ObjectList) error {
+ var foundAtlas bool
+
+ listeners := make([]*Listener, 0, len(list.Items))
+ for _, item := range list.Items {
+ key := "listener"
+ if len(item.Keys) > 0 {
+ key = item.Keys[0].Token.Value().(string)
+ }
+
+ valid := []string{
+ "address",
+ "cluster_address",
+ "endpoint",
+ "infrastructure",
+ "node_id",
+ "tls_disable",
+ "tls_cert_file",
+ "tls_key_file",
+ "tls_min_version",
+ "tls_cipher_suites",
+ "tls_prefer_server_cipher_suites",
+ "tls_require_and_verify_client_cert",
+ "token",
+ }
+ if err := checkHCLKeys(item.Val, valid); err != nil {
+ return multierror.Prefix(err, fmt.Sprintf("listeners.%s:", key))
+ }
+
+ var m map[string]string
+ if err := hcl.DecodeObject(&m, item.Val); err != nil {
+ return multierror.Prefix(err, fmt.Sprintf("listeners.%s:", key))
+ }
+
+ lnType := strings.ToLower(key)
+
+ if lnType == "atlas" {
+ if foundAtlas {
+ return multierror.Prefix(fmt.Errorf("only one listener of type 'atlas' is permitted"), fmt.Sprintf("listeners.%s", key))
+ }
+
+ foundAtlas = true
+ if m["token"] == "" {
+ return multierror.Prefix(fmt.Errorf("'token' must be specified for an Atlas listener"), fmt.Sprintf("listeners.%s", key))
+ }
+ if m["infrastructure"] == "" {
+ return multierror.Prefix(fmt.Errorf("'infrastructure' must be specified for an Atlas listener"), fmt.Sprintf("listeners.%s", key))
+ }
+ if m["node_id"] == "" {
+ return multierror.Prefix(fmt.Errorf("'node_id' must be specified for an Atlas listener"), fmt.Sprintf("listeners.%s", key))
+ }
+ }
+
+ listeners = append(listeners, &Listener{
+ Type: lnType,
+ Config: m,
+ })
+ }
+
+ result.Listeners = listeners
+ return nil
+}
+
+func parseTelemetry(result *Config, list *ast.ObjectList) error {
+ if len(list.Items) > 1 {
+ return fmt.Errorf("only one 'telemetry' block is permitted")
+ }
+
+ // Get our one item
+ item := list.Items[0]
+
+ // Check for invalid keys
+ valid := []string{
+ "circonus_api_token",
+ "circonus_api_app",
+ "circonus_api_url",
+ "circonus_submission_interval",
+ "circonus_submission_url",
+ "circonus_check_id",
+ "circonus_check_force_metric_activation",
+ "circonus_check_instance_id",
+ "circonus_check_search_tag",
+ "circonus_check_display_name",
+ "circonus_check_tags",
+ "circonus_broker_id",
+ "circonus_broker_select_tag",
+ "disable_hostname",
+ "statsd_address",
+ "statsite_address",
+ }
+ if err := checkHCLKeys(item.Val, valid); err != nil {
+ return multierror.Prefix(err, "telemetry:")
+ }
+
+ var t Telemetry
+ if err := hcl.DecodeObject(&t, item.Val); err != nil {
+ return multierror.Prefix(err, "telemetry:")
+ }
+
+ if result.Telemetry == nil {
+ result.Telemetry = &Telemetry{}
+ }
+
+ if err := hcl.DecodeObject(&result.Telemetry, item.Val); err != nil {
+ return multierror.Prefix(err, "telemetry:")
+ }
+ return nil
+}
+
+func checkHCLKeys(node ast.Node, valid []string) error {
+ var list *ast.ObjectList
+ switch n := node.(type) {
+ case *ast.ObjectList:
+ list = n
+ case *ast.ObjectType:
+ list = n.List
+ default:
+ return fmt.Errorf("cannot check HCL keys of type %T", n)
+ }
+
+ validMap := make(map[string]struct{}, len(valid))
+ for _, v := range valid {
+ validMap[v] = struct{}{}
+ }
+
+ var result error
+ for _, item := range list.Items {
+ key := item.Keys[0].Token.Value().(string)
+ if _, ok := validMap[key]; !ok {
+ result = multierror.Append(result, fmt.Errorf(
+ "invalid key '%s' on line %d", key, item.Assign.Line))
+ }
+ }
+
+ return result
+}
diff --git a/vendor/github.com/hashicorp/vault/command/server/config_test.go b/vendor/github.com/hashicorp/vault/command/server/config_test.go
new file mode 100644
index 0000000..789be40
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/server/config_test.go
@@ -0,0 +1,330 @@
+package server
+
+import (
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/helper/logformat"
+ log "github.com/mgutz/logxi/v1"
+)
+
+func TestLoadConfigFile(t *testing.T) {
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ config, err := LoadConfigFile("./test-fixtures/config.hcl", logger)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ expected := &Config{
+ Listeners: []*Listener{
+ &Listener{
+ Type: "atlas",
+ Config: map[string]string{
+ "token": "foobar",
+ "infrastructure": "foo/bar",
+ "endpoint": "https://foo.bar:1111",
+ "node_id": "foo_node",
+ },
+ },
+ &Listener{
+ Type: "tcp",
+ Config: map[string]string{
+ "address": "127.0.0.1:443",
+ },
+ },
+ },
+
+ Storage: &Storage{
+ Type: "consul",
+ RedirectAddr: "foo",
+ Config: map[string]string{
+ "foo": "bar",
+ },
+ },
+
+ HAStorage: &Storage{
+ Type: "consul",
+ RedirectAddr: "snafu",
+ Config: map[string]string{
+ "bar": "baz",
+ },
+ DisableClustering: true,
+ },
+
+ Telemetry: &Telemetry{
+ StatsdAddr: "bar",
+ StatsiteAddr: "foo",
+ DisableHostname: false,
+ },
+
+ DisableCache: true,
+ DisableCacheRaw: true,
+ DisableMlock: true,
+ DisableMlockRaw: true,
+ EnableUI: true,
+ EnableUIRaw: true,
+
+ MaxLeaseTTL: 10 * time.Hour,
+ MaxLeaseTTLRaw: "10h",
+ DefaultLeaseTTL: 10 * time.Hour,
+ DefaultLeaseTTLRaw: "10h",
+ ClusterName: "testcluster",
+ }
+ if !reflect.DeepEqual(config, expected) {
+ t.Fatalf("expected \n\n%#v\n\n to be \n\n%#v\n\n", config, expected)
+ }
+}
+
+func TestLoadConfigFile_json(t *testing.T) {
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ config, err := LoadConfigFile("./test-fixtures/config.hcl.json", logger)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ expected := &Config{
+ Listeners: []*Listener{
+ &Listener{
+ Type: "tcp",
+ Config: map[string]string{
+ "address": "127.0.0.1:443",
+ },
+ },
+ &Listener{
+ Type: "atlas",
+ Config: map[string]string{
+ "token": "foobar",
+ "infrastructure": "foo/bar",
+ "endpoint": "https://foo.bar:1111",
+ "node_id": "foo_node",
+ },
+ },
+ },
+
+ Storage: &Storage{
+ Type: "consul",
+ Config: map[string]string{
+ "foo": "bar",
+ },
+ DisableClustering: true,
+ },
+
+ Telemetry: &Telemetry{
+ StatsiteAddr: "baz",
+ StatsdAddr: "",
+ DisableHostname: false,
+ CirconusAPIToken: "",
+ CirconusAPIApp: "",
+ CirconusAPIURL: "",
+ CirconusSubmissionInterval: "",
+ CirconusCheckSubmissionURL: "",
+ CirconusCheckID: "",
+ CirconusCheckForceMetricActivation: "",
+ CirconusCheckInstanceID: "",
+ CirconusCheckSearchTag: "",
+ CirconusCheckDisplayName: "",
+ CirconusCheckTags: "",
+ CirconusBrokerID: "",
+ CirconusBrokerSelectTag: "",
+ },
+
+ MaxLeaseTTL: 10 * time.Hour,
+ MaxLeaseTTLRaw: "10h",
+ DefaultLeaseTTL: 10 * time.Hour,
+ DefaultLeaseTTLRaw: "10h",
+ ClusterName: "testcluster",
+ DisableCacheRaw: interface{}(nil),
+ DisableMlockRaw: interface{}(nil),
+ EnableUI: true,
+ EnableUIRaw: true,
+ }
+ if !reflect.DeepEqual(config, expected) {
+ t.Fatalf("expected \n\n%#v\n\n to be \n\n%#v\n\n", config, expected)
+ }
+}
+
+func TestLoadConfigFile_json2(t *testing.T) {
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ config, err := LoadConfigFile("./test-fixtures/config2.hcl.json", logger)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ expected := &Config{
+ Listeners: []*Listener{
+ &Listener{
+ Type: "tcp",
+ Config: map[string]string{
+ "address": "127.0.0.1:443",
+ },
+ },
+ &Listener{
+ Type: "tcp",
+ Config: map[string]string{
+ "address": "127.0.0.1:444",
+ },
+ },
+ },
+
+ Storage: &Storage{
+ Type: "consul",
+ Config: map[string]string{
+ "foo": "bar",
+ },
+ DisableClustering: true,
+ },
+
+ HAStorage: &Storage{
+ Type: "consul",
+ Config: map[string]string{
+ "bar": "baz",
+ },
+ },
+
+ CacheSize: 45678,
+
+ EnableUI: true,
+
+ Telemetry: &Telemetry{
+ StatsiteAddr: "foo",
+ StatsdAddr: "bar",
+ DisableHostname: true,
+ CirconusAPIToken: "0",
+ CirconusAPIApp: "vault",
+ CirconusAPIURL: "http://api.circonus.com/v2",
+ CirconusSubmissionInterval: "10s",
+ CirconusCheckSubmissionURL: "https://someplace.com/metrics",
+ CirconusCheckID: "0",
+ CirconusCheckForceMetricActivation: "true",
+ CirconusCheckInstanceID: "node1:vault",
+ CirconusCheckSearchTag: "service:vault",
+ CirconusCheckDisplayName: "node1:vault",
+ CirconusCheckTags: "cat1:tag1,cat2:tag2",
+ CirconusBrokerID: "0",
+ CirconusBrokerSelectTag: "dc:sfo",
+ },
+ }
+ if !reflect.DeepEqual(config, expected) {
+ }
+}
+
+func TestLoadConfigDir(t *testing.T) {
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ config, err := LoadConfigDir("./test-fixtures/config-dir", logger)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ expected := &Config{
+ DisableCache: true,
+ DisableMlock: true,
+
+ Listeners: []*Listener{
+ &Listener{
+ Type: "tcp",
+ Config: map[string]string{
+ "address": "127.0.0.1:443",
+ },
+ },
+ },
+
+ Storage: &Storage{
+ Type: "consul",
+ Config: map[string]string{
+ "foo": "bar",
+ },
+ DisableClustering: true,
+ },
+
+ EnableUI: true,
+
+ Telemetry: &Telemetry{
+ StatsiteAddr: "qux",
+ StatsdAddr: "baz",
+ DisableHostname: true,
+ },
+
+ MaxLeaseTTL: 10 * time.Hour,
+ DefaultLeaseTTL: 10 * time.Hour,
+ ClusterName: "testcluster",
+ }
+ if !reflect.DeepEqual(config, expected) {
+ t.Fatalf("expected \n\n%#v\n\n to be \n\n%#v\n\n", config, expected)
+ }
+}
+
+func TestParseConfig_badTopLevel(t *testing.T) {
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ _, err := ParseConfig(strings.TrimSpace(`
+backend {}
+bad = "one"
+nope = "yes"
+`), logger)
+
+ if err == nil {
+ t.Fatal("expected error")
+ }
+
+ if !strings.Contains(err.Error(), "invalid key 'bad' on line 2") {
+ t.Errorf("bad error: %q", err)
+ }
+
+ if !strings.Contains(err.Error(), "invalid key 'nope' on line 3") {
+ t.Errorf("bad error: %q", err)
+ }
+}
+
+func TestParseConfig_badListener(t *testing.T) {
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ _, err := ParseConfig(strings.TrimSpace(`
+listener "tcp" {
+ address = "1.2.3.3"
+ bad = "one"
+ nope = "yes"
+}
+`), logger)
+
+ if err == nil {
+ t.Fatal("expected error")
+ }
+
+ if !strings.Contains(err.Error(), "listeners.tcp: invalid key 'bad' on line 3") {
+ t.Errorf("bad error: %q", err)
+ }
+
+ if !strings.Contains(err.Error(), "listeners.tcp: invalid key 'nope' on line 4") {
+ t.Errorf("bad error: %q", err)
+ }
+}
+
+func TestParseConfig_badTelemetry(t *testing.T) {
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ _, err := ParseConfig(strings.TrimSpace(`
+telemetry {
+ statsd_address = "1.2.3.3"
+ bad = "one"
+ nope = "yes"
+}
+`), logger)
+
+ if err == nil {
+ t.Fatal("expected error")
+ }
+
+ if !strings.Contains(err.Error(), "telemetry: invalid key 'bad' on line 3") {
+ t.Errorf("bad error: %q", err)
+ }
+
+ if !strings.Contains(err.Error(), "telemetry: invalid key 'nope' on line 4") {
+ t.Errorf("bad error: %q", err)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/server/listener.go b/vendor/github.com/hashicorp/vault/command/server/listener.go
new file mode 100644
index 0000000..999966e
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/server/listener.go
@@ -0,0 +1,150 @@
+package server
+
+import (
+ // We must import sha512 so that it registers with the runtime so that
+ // certificates that use it can be parsed.
+ _ "crypto/sha512"
+ "crypto/tls"
+ "fmt"
+ "io"
+ "net"
+ "strconv"
+ "sync"
+
+ "github.com/hashicorp/vault/helper/tlsutil"
+ "github.com/hashicorp/vault/vault"
+)
+
+// ListenerFactory is the factory function to create a listener.
+type ListenerFactory func(map[string]string, io.Writer) (net.Listener, map[string]string, vault.ReloadFunc, error)
+
+// BuiltinListeners is the list of built-in listener types.
+var BuiltinListeners = map[string]ListenerFactory{
+ "tcp": tcpListenerFactory,
+ "atlas": atlasListenerFactory,
+}
+
+// NewListener creates a new listener of the given type with the given
+// configuration. The type is looked up in the BuiltinListeners map.
+func NewListener(t string, config map[string]string, logger io.Writer) (net.Listener, map[string]string, vault.ReloadFunc, error) {
+ f, ok := BuiltinListeners[t]
+ if !ok {
+ return nil, nil, nil, fmt.Errorf("unknown listener type: %s", t)
+ }
+
+ return f(config, logger)
+}
+
+func listenerWrapTLS(
+ ln net.Listener,
+ props map[string]string,
+ config map[string]string) (net.Listener, map[string]string, vault.ReloadFunc, error) {
+ props["tls"] = "disabled"
+
+ if v, ok := config["tls_disable"]; ok {
+ disabled, err := strconv.ParseBool(v)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("invalid value for 'tls_disable': %v", err)
+ }
+ if disabled {
+ return ln, props, nil, nil
+ }
+ }
+
+ _, ok := config["tls_cert_file"]
+ if !ok {
+ return nil, nil, nil, fmt.Errorf("'tls_cert_file' must be set")
+ }
+
+ _, ok = config["tls_key_file"]
+ if !ok {
+ return nil, nil, nil, fmt.Errorf("'tls_key_file' must be set")
+ }
+
+ cg := &certificateGetter{
+ id: config["address"],
+ }
+
+ if err := cg.reload(config); err != nil {
+ return nil, nil, nil, fmt.Errorf("error loading TLS cert: %s", err)
+ }
+
+ tlsvers, ok := config["tls_min_version"]
+ if !ok {
+ tlsvers = "tls12"
+ }
+
+ tlsConf := &tls.Config{}
+ tlsConf.GetCertificate = cg.getCertificate
+ tlsConf.NextProtos = []string{"h2", "http/1.1"}
+ tlsConf.MinVersion, ok = tlsutil.TLSLookup[tlsvers]
+ if !ok {
+ return nil, nil, nil, fmt.Errorf("'tls_min_version' value %s not supported, please specify one of [tls10,tls11,tls12]", tlsvers)
+ }
+ tlsConf.ClientAuth = tls.RequestClientCert
+
+ if v, ok := config["tls_cipher_suites"]; ok {
+ ciphers, err := tlsutil.ParseCiphers(v)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("invalid value for 'tls_cipher_suites': %v", err)
+ }
+ tlsConf.CipherSuites = ciphers
+ }
+ if v, ok := config["tls_prefer_server_cipher_suites"]; ok {
+ preferServer, err := strconv.ParseBool(v)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("invalid value for 'tls_prefer_server_cipher_suites': %v", err)
+ }
+ tlsConf.PreferServerCipherSuites = preferServer
+ }
+ if v, ok := config["tls_require_and_verify_client_cert"]; ok {
+ requireClient, err := strconv.ParseBool(v)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("invalid value for 'tls_require_and_verify_client_cert': %v", err)
+ }
+ if requireClient {
+ tlsConf.ClientAuth = tls.RequireAndVerifyClientCert
+ }
+ }
+
+ ln = tls.NewListener(ln, tlsConf)
+ props["tls"] = "enabled"
+ return ln, props, cg.reload, nil
+}
+
+type certificateGetter struct {
+ sync.RWMutex
+
+ cert *tls.Certificate
+
+ id string
+}
+
+func (cg *certificateGetter) reload(config map[string]string) error {
+ if config["address"] != cg.id {
+ return nil
+ }
+
+ cert, err := tls.LoadX509KeyPair(config["tls_cert_file"], config["tls_key_file"])
+ if err != nil {
+ return err
+ }
+
+ cg.Lock()
+ defer cg.Unlock()
+
+ cg.cert = &cert
+
+ return nil
+}
+
+func (cg *certificateGetter) getCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
+ cg.RLock()
+ defer cg.RUnlock()
+
+ if cg.cert == nil {
+ return nil, fmt.Errorf("nil certificate")
+ }
+
+ return cg.cert, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/command/server/listener_atlas.go b/vendor/github.com/hashicorp/vault/command/server/listener_atlas.go
new file mode 100644
index 0000000..c000474
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/server/listener_atlas.go
@@ -0,0 +1,66 @@
+package server
+
+import (
+ "io"
+ "net"
+
+ "github.com/hashicorp/scada-client/scada"
+ "github.com/hashicorp/vault/vault"
+ "github.com/hashicorp/vault/version"
+)
+
+type SCADAListener struct {
+ ln net.Listener
+ scadaProvider *scada.Provider
+}
+
+func (s *SCADAListener) Accept() (net.Conn, error) {
+ return s.ln.Accept()
+}
+
+func (s *SCADAListener) Close() error {
+ s.scadaProvider.Shutdown()
+ return s.ln.Close()
+}
+
+func (s *SCADAListener) Addr() net.Addr {
+ return s.ln.Addr()
+}
+
+func atlasListenerFactory(config map[string]string, logger io.Writer) (net.Listener, map[string]string, vault.ReloadFunc, error) {
+ scadaConfig := &scada.Config{
+ Service: "vault",
+ Version: version.GetVersion().VersionNumber(),
+ ResourceType: "vault-cluster",
+ Meta: map[string]string{
+ "node_id": config["node_id"],
+ "cluster_name": config["cluster_name"],
+ },
+ Atlas: scada.AtlasConfig{
+ Endpoint: config["endpoint"],
+ Infrastructure: config["infrastructure"],
+ Token: config["token"],
+ },
+ }
+
+ provider, list, err := scada.NewHTTPProvider(scadaConfig, logger)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ ln := &SCADAListener{
+ ln: list,
+ scadaProvider: provider,
+ }
+
+ props := map[string]string{
+ "addr": "Atlas/SCADA",
+ "infrastructure": scadaConfig.Atlas.Infrastructure,
+ }
+
+ // The outer connection is already TLS-enabled; this is just the listener
+ // that reaches back inside that connection
+ config["tls_disable"] = "1"
+
+ return listenerWrapTLS(ln, props, config)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/server/listener_tcp.go b/vendor/github.com/hashicorp/vault/command/server/listener_tcp.go
new file mode 100644
index 0000000..4e5e9b4
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/server/listener_tcp.go
@@ -0,0 +1,53 @@
+package server
+
+import (
+ "io"
+ "net"
+ "strings"
+ "time"
+
+ "github.com/hashicorp/vault/vault"
+)
+
+func tcpListenerFactory(config map[string]string, _ io.Writer) (net.Listener, map[string]string, vault.ReloadFunc, error) {
+ bind_proto := "tcp"
+ addr, ok := config["address"]
+ if !ok {
+ addr = "127.0.0.1:8200"
+ }
+
+ // If they've passed 0.0.0.0, we only want to bind on IPv4
+ // rather than golang's dual stack default
+ if strings.HasPrefix(addr, "0.0.0.0:") {
+ bind_proto = "tcp4"
+ }
+
+ ln, err := net.Listen(bind_proto, addr)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ ln = tcpKeepAliveListener{ln.(*net.TCPListener)}
+ props := map[string]string{"addr": addr}
+ return listenerWrapTLS(ln, props, config)
+}
+
+// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted
+// connections. It's used by ListenAndServe and ListenAndServeTLS so
+// dead TCP connections (e.g. closing laptop mid-download) eventually
+// go away.
+//
+// This is copied directly from the Go source code.
+type tcpKeepAliveListener struct {
+ *net.TCPListener
+}
+
+func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
+ tc, err := ln.AcceptTCP()
+ if err != nil {
+ return
+ }
+ tc.SetKeepAlive(true)
+ tc.SetKeepAlivePeriod(3 * time.Minute)
+ return tc, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/command/server/listener_tcp_test.go b/vendor/github.com/hashicorp/vault/command/server/listener_tcp_test.go
new file mode 100644
index 0000000..7da2033
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/server/listener_tcp_test.go
@@ -0,0 +1,74 @@
+package server
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io/ioutil"
+ "math/rand"
+ "net"
+ "os"
+ "testing"
+ "time"
+)
+
+func TestTCPListener(t *testing.T) {
+ ln, _, _, err := tcpListenerFactory(map[string]string{
+ "address": "127.0.0.1:0",
+ "tls_disable": "1",
+ }, nil)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ connFn := func(lnReal net.Listener) (net.Conn, error) {
+ return net.Dial("tcp", ln.Addr().String())
+ }
+
+ testListenerImpl(t, ln, connFn, "")
+}
+
+// TestTCPListener_tls tests both TLS generally and also the reload capability
+// of core, system backend, and the listener logic
+func TestTCPListener_tls(t *testing.T) {
+ wd, _ := os.Getwd()
+ wd += "/test-fixtures/reload/"
+
+ td, err := ioutil.TempDir("", fmt.Sprintf("vault-test-%d", rand.New(rand.NewSource(time.Now().Unix())).Int63))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(td)
+
+ // Setup initial certs
+ inBytes, _ := ioutil.ReadFile(wd + "reload_ca.pem")
+ certPool := x509.NewCertPool()
+ ok := certPool.AppendCertsFromPEM(inBytes)
+ if !ok {
+ t.Fatal("not ok when appending CA cert")
+ }
+
+ ln, _, _, err := tcpListenerFactory(map[string]string{
+ "address": "127.0.0.1:0",
+ "tls_cert_file": wd + "reload_foo.pem",
+ "tls_key_file": wd + "reload_foo.key",
+ }, nil)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ connFn := func(lnReal net.Listener) (net.Conn, error) {
+ conn, err := tls.Dial("tcp", ln.Addr().String(), &tls.Config{
+ RootCAs: certPool,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if err = conn.Handshake(); err != nil {
+ return nil, err
+ }
+ return conn, nil
+ }
+
+ testListenerImpl(t, ln, connFn, "foo.example.com")
+}
diff --git a/vendor/github.com/hashicorp/vault/command/server/listener_test.go b/vendor/github.com/hashicorp/vault/command/server/listener_test.go
new file mode 100644
index 0000000..e7fb4d3
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/server/listener_test.go
@@ -0,0 +1,64 @@
+package server
+
+import (
+ "bytes"
+ "crypto/tls"
+ "io"
+ "net"
+ "testing"
+)
+
+type testListenerConnFn func(net.Listener) (net.Conn, error)
+
+func testListenerImpl(t *testing.T, ln net.Listener, connFn testListenerConnFn, certName string) {
+ serverCh := make(chan net.Conn, 1)
+ go func() {
+ server, err := ln.Accept()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if certName != "" {
+ tlsConn := server.(*tls.Conn)
+ tlsConn.Handshake()
+ }
+ serverCh <- server
+ }()
+
+ client, err := connFn(ln)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if certName != "" {
+ tlsConn := client.(*tls.Conn)
+ if len(tlsConn.ConnectionState().PeerCertificates) != 1 {
+ t.Fatalf("err: number of certs too long")
+ }
+ peerName := tlsConn.ConnectionState().PeerCertificates[0].Subject.CommonName
+ if peerName != certName {
+ t.Fatalf("err: bad cert name %s, expected %s", peerName, certName)
+ }
+ }
+
+ server := <-serverCh
+ defer client.Close()
+ defer server.Close()
+
+ var buf bytes.Buffer
+ copyCh := make(chan struct{})
+ go func() {
+ io.Copy(&buf, server)
+ close(copyCh)
+ }()
+
+ if _, err := client.Write([]byte("foo")); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ client.Close()
+
+ <-copyCh
+ if buf.String() != "foo" {
+ t.Fatalf("bad: %v", buf.String())
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config-dir/bar.json b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config-dir/bar.json
new file mode 100644
index 0000000..16055e8
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config-dir/bar.json
@@ -0,0 +1,11 @@
+{
+ "ui":false,
+
+ "listener": {
+ "tcp": {
+ "address": "127.0.0.1:443"
+ }
+ },
+
+ "max_lease_ttl": "10h"
+}
diff --git a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config.hcl.json b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config.hcl.json
new file mode 100644
index 0000000..70e7e14
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config.hcl.json
@@ -0,0 +1,27 @@
+{
+ "listener": [{
+ "tcp": {
+ "address": "127.0.0.1:443"
+ }
+ }, {
+ "atlas": {
+ "token": "foobar",
+ "infrastructure": "foo/bar",
+ "endpoint": "https://foo.bar:1111",
+ "node_id": "foo_node"
+ }
+ }],
+ "storage": {
+ "consul": {
+ "foo": "bar",
+ "disable_clustering": "true"
+ }
+ },
+ "telemetry": {
+ "statsite_address": "baz"
+ },
+ "max_lease_ttl": "10h",
+ "default_lease_ttl": "10h",
+ "cluster_name":"testcluster",
+ "ui":true
+}
diff --git a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config2.hcl.json b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config2.hcl.json
new file mode 100644
index 0000000..5279d63
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config2.hcl.json
@@ -0,0 +1,45 @@
+{
+ "ui":true,
+ "listener":[
+ {
+ "tcp":{
+ "address":"127.0.0.1:443"
+ }
+ },
+ {
+ "tcp":{
+ "address":"127.0.0.1:444"
+ }
+ }
+ ],
+ "storage":{
+ "consul":{
+ "foo":"bar"
+ }
+ },
+ "ha_storage":{
+ "consul":{
+ "bar":"baz",
+ "disable_clustering": "true"
+ }
+ },
+ "cache_size": 45678,
+ "telemetry":{
+ "statsd_address":"bar",
+ "statsite_address":"foo",
+ "disable_hostname":true,
+ "circonus_api_token": "0",
+ "circonus_api_app": "vault",
+ "circonus_api_url": "http://api.circonus.com/v2",
+ "circonus_submission_interval": "10s",
+ "circonus_submission_url": "https://someplace.com/metrics",
+ "circonus_check_id": "0",
+ "circonus_check_force_metric_activation": "true",
+ "circonus_check_instance_id": "node1:vault",
+ "circonus_check_search_tag": "service:vault",
+ "circonus_check_display_name": "node1:vault",
+ "circonus_check_tags": "cat1:tag1,cat2:tag2",
+ "circonus_broker_id": "0",
+ "circonus_broker_select_tag": "dc:sfo"
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/reload/reload_bar.key b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/reload/reload_bar.key
new file mode 100644
index 0000000..10849fb
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/reload/reload_bar.key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAwF7sRAyUiLcd6es6VeaTRUBOusFFGkmKJ5lU351waCJqXFju
+Z6i/SQYNAAnnRgotXSTE1fIPjE2kZNH1hvqE5IpTGgAwy50xpjJrrBBI6e9lyKqj
+7T8gLVNBvtC0cpQi+pGrszEI0ckDQCSZHqi/PAzcpmLUgh2KMrgagT+YlN35KHtl
+/bQ/Fsn+kqykVqNw69n/CDKNKdDHn1qPwiX9q/fTMj3EG6g+3ntKrUOh8V/gHKPz
+q8QGP/wIud2K+tTSorVXr/4zx7xgzlbJkCakzcQQiP6K+paPnDRlE8fK+1gRRyR7
+XCzyp0irUl8G1NjYAR/tVWxiUhlk/jZutb8PpwIDAQABAoIBAEOzJELuindyujxQ
+ZD9G3h1I/GwNCFyv9Mbq10u7BIwhUH0fbwdcA7WXQ4v38ERd4IkfH4aLoZ0m1ewF
+V/sgvxQO+h/0YTfHImny5KGxOXfaoF92bipYROKuojydBmQsbgLwsRRm9UufCl3Q
+g3KewG5JuH112oPQEYq379v8nZ4FxC3Ano1OFBTm9UhHIAX1Dn22kcHOIIw8jCsQ
+zp7TZOW+nwtkS41cBwhvV4VIeL6yse2UgbOfRVRwI7B0OtswS5VgW3wysO2mTDKt
+V/WCmeht1il/6ZogEHgi/mvDCKpj20wQ1EzGnPdFLdiFJFylf0oufQD/7N/uezbC
+is0qJEECgYEA3AE7SeLpe3SZApj2RmE2lcD9/Saj1Y30PznxB7M7hK0sZ1yXEbtS
+Qf894iDDD/Cn3ufA4xk/K52CXgAcqvH/h2geG4pWLYsT1mdWhGftprtOMCIvJvzU
+8uWJzKdOGVMG7R59wNgEpPDZDpBISjexwQsFo3aw1L/H1/Sa8cdY3a0CgYEA39hB
+1oLmGRyE32Q4GF/srG4FqKL1EsbISGDUEYTnaYg2XiM43gu3tC/ikfclk27Jwc2L
+m7cA5FxxaEyfoOgfAizfU/uWTAbx9GoXgWsO0hWSN9+YNq61gc5WKoHyrJ/rfrti
+y5d7k0OCeBxckLqGDuJqICQ0myiz0El6FU8h5SMCgYEAuhigmiNC9JbwRu40g9v/
+XDVfox9oPmBRVpogdC78DYKeqN/9OZaGQiUxp3GnDni2xyqqUm8srCwT9oeJuF/z
+kgpUTV96/hNCuH25BU8UC5Es1jJUSFpdlwjqwx5SRcGhfjnojZMseojwUg1h2MW7
+qls0bc0cTxnaZaYW2qWRWhECgYBrT0cwyQv6GdvxJCBoPwQ9HXmFAKowWC+H0zOX
+Onmd8/jsZEJM4J0uuo4Jn8vZxBDg4eL9wVuiHlcXwzP7dYv4BP8DSechh2rS21Ft
+b59pQ4IXWw+jl1nYYsyYEDgAXaIN3VNder95N7ICVsZhc6n01MI/qlu1zmt1fOQT
+9x2utQKBgHI9SbsfWfbGiu6oLS3+9V1t4dORhj8D8b7z3trvECrD6tPhxoZqtfrH
+4apKr3OKRSXk3K+1K6pkMHJHunspucnA1ChXLhzfNF08BSRJkQDGYuaRLS6VGgab
+JZTl54bGvO1GkszEBE/9QFcqNVtWGMWXnUPwNNv8t//yJT5rvQil
+-----END RSA PRIVATE KEY-----
diff --git a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/reload/reload_bar.pem b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/reload/reload_bar.pem
new file mode 100644
index 0000000..a8217be
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/reload/reload_bar.pem
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDQzCCAiugAwIBAgIULLCz3mZKmg2xy3rWCud0f1zcmBwwDQYJKoZIhvcNAQEL
+BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMzEwMDIzNjQ0WhcNMzYw
+MzA1MDEzNzE0WjAaMRgwFgYDVQQDEw9iYXIuZXhhbXBsZS5jb20wggEiMA0GCSqG
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAXuxEDJSItx3p6zpV5pNFQE66wUUaSYon
+mVTfnXBoImpcWO5nqL9JBg0ACedGCi1dJMTV8g+MTaRk0fWG+oTkilMaADDLnTGm
+MmusEEjp72XIqqPtPyAtU0G+0LRylCL6kauzMQjRyQNAJJkeqL88DNymYtSCHYoy
+uBqBP5iU3fkoe2X9tD8Wyf6SrKRWo3Dr2f8IMo0p0MefWo/CJf2r99MyPcQbqD7e
+e0qtQ6HxX+Aco/OrxAY//Ai53Yr61NKitVev/jPHvGDOVsmQJqTNxBCI/or6lo+c
+NGUTx8r7WBFHJHtcLPKnSKtSXwbU2NgBH+1VbGJSGWT+Nm61vw+nAgMBAAGjgYQw
+gYEwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBSVoF8F
+7qbzSryIFrldurAG78LvSjAfBgNVHSMEGDAWgBRzDNvqF/Tq21OgWs13B5YydZjl
+vzAgBgNVHREEGTAXgg9iYXIuZXhhbXBsZS5jb22HBH8AAAEwDQYJKoZIhvcNAQEL
+BQADggEBAGmz2N282iT2IaEZvOmzIE4znHGkvoxZmrr/2byq5PskBg9ysyCHfUvw
+SFA8U7jWjezKTnGRUu5blB+yZdjrMtB4AePWyEqtkJwVsZ2SPeP+9V2gNYK4iktP
+UF3aIgBbAbw8rNuGIIB0T4D+6Zyo9Y3MCygs6/N4bRPZgLhewWn1ilklfnl3eqaC
+a+JY1NBuTgCMa28NuC+Hy3mCveqhI8tFNiOthlLdgAEbuQaOuNutAG73utZ2aq6Q
+W4pajFm3lEf5zt7Lo6ZCFtY/Q8jjURJ9e4O7VjXcqIhBM5bSMI6+fgQyOH0SLboj
+RNanJ2bcyF1iPVyPBGzV3dF0ngYzxEY=
+-----END CERTIFICATE-----
diff --git a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/reload/reload_ca.pem b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/reload/reload_ca.pem
new file mode 100644
index 0000000..72a7444
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/reload/reload_ca.pem
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDNTCCAh2gAwIBAgIUBeVo+Ce2BrdRT1cogKvJLtdOky8wDQYJKoZIhvcNAQEL
+BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMzEwMDIzNTM4WhcNMzYw
+MzA1MDIzNjA4WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAPTQGWPRIOECGeJB6tR/ftvvtioC9f84fY2QdJ5k
+JBupXjPAGYKgS4MGzyT5bz9yY400tCtmh6h7p9tZwHl/TElTugtLQ/8ilMbJTiOM
+SiyaMDPHiMJJYKTjm9bu6bKeU1qPZ0Cryes4rygbqs7w2XPgA2RxNmDh7JdX7/h+
+VB5onBmv8g4WFSayowGyDcJWWCbu5yv6ZdH1bqQjgRzQ5xp17WXNmvlzdp2vate/
+9UqPdA8sdJzW/91Gvmros0o/FnG7c2pULhk22wFqO8t2HRjKb3nuxALEJvqoPvad
+KjpDTaq1L1ZzxcB7wvWyhy/lNLZL7jiNWy0mN1YB0UpSWdECAwEAAaN7MHkwDgYD
+VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFHMM2+oX9Orb
+U6BazXcHljJ1mOW/MB8GA1UdIwQYMBaAFHMM2+oX9OrbU6BazXcHljJ1mOW/MBYG
+A1UdEQQPMA2CC2V4YW1wbGUuY29tMA0GCSqGSIb3DQEBCwUAA4IBAQAp17XsOaT9
+hculRqrFptn3+zkH3HrIckHm+28R5xYT8ASFXFcLFugGizJAXVL5lvsRVRIwCoOX
+Nhi8XSNEFP640VbHcEl81I84bbRIIDS+Yheu6JDZGemTaDYLv1J3D5SHwgoM+nyf
+oTRgotUCIXcwJHmTpWEUkZFKuqBxsoTGzk0jO8wOP6xoJkzxVVG5PvNxs924rxY8
+Y8iaLdDfMeT7Pi0XIliBa/aSp/iqSW8XKyJl5R5vXg9+DOgZUrVzIxObaF5RBl/a
+mJOeklJBdNVzQm5+iMpO42lu0TA9eWtpP+YiUEXU17XDvFeQWOocFbQ1Peo0W895
+XRz2GCwCNyvW
+-----END CERTIFICATE-----
diff --git a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/reload/reload_foo.key b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/reload/reload_foo.key
new file mode 100644
index 0000000..86e6cce
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/reload/reload_foo.key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpgIBAAKCAQEAzNyVieSti9XBb5/celB5u8YKRJv3mQS9A4/X0mqY1ePznt1i
+ilG7OmG0yM2VAk0ceIAQac3Bsn74jxn2cDlrrVniPXcNgYtMtW0kRqNEo4doo4EX
+xZguS9vNBu29useHhif1TGX/pA3dgvaVycUCjzTEVk6qI8UEehMK6gEGZb7nOr0A
+A9nipSqoeHpDLe3a4KVqj1vtlJKUvD2i1MuBuQ130cB1K9rufLCShGu7mEgzEosc
+gr+K3Bf03IejbeVRyIfLtgj1zuvV1katec75UqRA/bsvt5G9JfJqiZ9mwFN0vp3g
+Cr7pdQBSBQ2q4yf9s8CuY5c5w9fl3F8f5QFQoQIDAQABAoIBAQCbCb1qNFRa5ZSV
+I8i6ELlwMDqJHfhOJ9XcIjpVljLAfNlcu3Ld92jYkCU/asaAjVckotbJG9yhd5Io
+yp9E40/oS4P6vGTOS1vsWgMAKoPBtrKsOwCAm+E9q8UIn1fdSS/5ibgM74x+3bds
+a62Em8KKGocUQkhk9a+jq1GxMsFisbHRxEHvClLmDMgGnW3FyGmWwT6yZLPSC0ey
+szmmjt3ouP8cLAOmSjzcQBMmEZpQMCgR6Qckg6nrLQAGzZyTdCd875wbGA57DpWX
+Lssn95+A5EFvr/6b7DkXeIFCrYBFFa+UQN3PWGEQ6Zjmiw4VgV2vO8yX2kCLlUhU
+02bL393ZAoGBAPXPD/0yWINbKUPcRlx/WfWQxfz0bu50ytwIXzVK+pRoAMuNqehK
+BJ6kNzTTBq40u+IZ4f5jbLDulymR+4zSkirLE7CyWFJOLNI/8K4Pf5DJUgNdrZjJ
+LCtP9XRdxiPatQF0NGfdgHlSJh+/CiRJP4AgB17AnB/4z9/M0ZlJGVrzAoGBANVa
+69P3Rp/WPBQv0wx6f0tWppJolWekAHKcDIdQ5HdOZE5CPAYSlTrTUW3uJuqMwU2L
+M0Er2gIPKWIR5X+9r7Fvu9hQW6l2v3xLlcrGPiapp3STJvuMxzhRAmXmu3bZfVn1
+Vn7Vf1jPULHtTFSlNFEvYG5UJmygK9BeyyVO5KMbAoGBAMCyAibLQPg4jrDUDZSV
+gUAwrgUO2ae1hxHWvkxY6vdMUNNByuB+pgB3W4/dnm8Sh/dHsxJpftt1Lqs39ar/
+p/ZEHLt4FCTxg9GOrm7FV4t5RwG8fko36phJpnIC0UFqQltRbYO+8OgqrhhU+u5X
+PaCDe0OcWsf1lYAsYGN6GpZhAoGBAMJ5Ksa9+YEODRs1cIFKUyd/5ztC2xRqOAI/
+3WemQ2nAacuvsfizDZVeMzYpww0+maAuBt0btI719PmwaGmkpDXvK+EDdlmkpOwO
+FY6MXvBs6fdnfjwCWUErDi2GQFAX9Jt/9oSL5JU1+08DhvUM1QA/V/2Y9KFE6kr3
+bOIn5F4LAoGBAKQzH/AThDGhT3hwr4ktmReF3qKxBgxzjVa8veXtkY5VWwyN09iT
+jnTTt6N1CchZoK5WCETjdzNYP7cuBTcV4d3bPNRiJmxXaNVvx3Tlrk98OiffT8Qa
+5DO/Wfb43rNHYXBjU6l0n2zWcQ4PUSSbu0P0bM2JTQPRCqSthXvSHw2P
+-----END RSA PRIVATE KEY-----
diff --git a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/reload/reload_foo.pem b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/reload/reload_foo.pem
new file mode 100644
index 0000000..c8b868b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/reload/reload_foo.pem
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDQzCCAiugAwIBAgIUFVW6i/M+yJUsDrXWgRKO/Dnb+L4wDQYJKoZIhvcNAQEL
+BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMzEwMDIzNjA1WhcNMzYw
+MzA1MDEzNjM1WjAaMRgwFgYDVQQDEw9mb28uZXhhbXBsZS5jb20wggEiMA0GCSqG
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDM3JWJ5K2L1cFvn9x6UHm7xgpEm/eZBL0D
+j9fSapjV4/Oe3WKKUbs6YbTIzZUCTRx4gBBpzcGyfviPGfZwOWutWeI9dw2Bi0y1
+bSRGo0Sjh2ijgRfFmC5L280G7b26x4eGJ/VMZf+kDd2C9pXJxQKPNMRWTqojxQR6
+EwrqAQZlvuc6vQAD2eKlKqh4ekMt7drgpWqPW+2UkpS8PaLUy4G5DXfRwHUr2u58
+sJKEa7uYSDMSixyCv4rcF/Tch6Nt5VHIh8u2CPXO69XWRq15zvlSpED9uy+3kb0l
+8mqJn2bAU3S+neAKvul1AFIFDarjJ/2zwK5jlznD1+XcXx/lAVChAgMBAAGjgYQw
+gYEwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBRNJoOJ
+dnazDiuqLhV6truQ4cRe9jAfBgNVHSMEGDAWgBRzDNvqF/Tq21OgWs13B5YydZjl
+vzAgBgNVHREEGTAXgg9mb28uZXhhbXBsZS5jb22HBH8AAAEwDQYJKoZIhvcNAQEL
+BQADggEBAHzv67mtbxMWcuMsxCFBN1PJNAyUDZVCB+1gWhk59EySbVg81hWJDCBy
+fl3TKjz3i7wBGAv+C2iTxmwsSJbda22v8JQbuscXIfLFbNALsPzF+J0vxAgJs5Gc
+sDbfJ7EQOIIOVKQhHLYnQoLnigSSPc1kd0JjYyHEBjgIaSuXgRRTBAeqLiBMx0yh
+RKL1lQ+WoBU/9SXUZZkwokqWt5G7khi5qZkNxVXZCm8VGPg0iywf6gGyhI1SU5S2
+oR219S6kA4JY/stw1qne85/EmHmoImHGt08xex3GoU72jKAjsIpqRWopcD/+uene
+Tc9nn3fTQW/Z9fsoJ5iF5OdJnDEswqE=
+-----END CERTIFICATE-----
diff --git a/vendor/github.com/hashicorp/vault/command/server_ha_test.go b/vendor/github.com/hashicorp/vault/command/server_ha_test.go
new file mode 100644
index 0000000..5562191
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/server_ha_test.go
@@ -0,0 +1,94 @@
+// +build !race
+
+package command
+
+import (
+ "io/ioutil"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/hashicorp/vault/meta"
+ "github.com/mitchellh/cli"
+)
+
+// The following tests have a go-metrics/exp manager race condition
+func TestServer_CommonHA(t *testing.T) {
+ ui := new(cli.MockUi)
+ c := &ServerCommand{
+ Meta: meta.Meta{
+ Ui: ui,
+ },
+ }
+
+ tmpfile, err := ioutil.TempFile("", "")
+ if err != nil {
+ t.Fatalf("error creating temp dir: %v", err)
+ }
+
+ tmpfile.WriteString(basehcl + consulhcl)
+ tmpfile.Close()
+ defer os.Remove(tmpfile.Name())
+
+ args := []string{"-config", tmpfile.Name(), "-verify-only", "true"}
+
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s\n\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String())
+ }
+
+ if !strings.Contains(ui.OutputWriter.String(), "(HA available)") {
+ t.Fatalf("did not find HA available: %s", ui.OutputWriter.String())
+ }
+}
+
+func TestServer_GoodSeparateHA(t *testing.T) {
+ ui := new(cli.MockUi)
+ c := &ServerCommand{
+ Meta: meta.Meta{
+ Ui: ui,
+ },
+ }
+
+ tmpfile, err := ioutil.TempFile("", "")
+ if err != nil {
+ t.Fatalf("error creating temp dir: %v", err)
+ }
+
+ tmpfile.WriteString(basehcl + consulhcl + haconsulhcl)
+ tmpfile.Close()
+ defer os.Remove(tmpfile.Name())
+
+ args := []string{"-config", tmpfile.Name(), "-verify-only", "true"}
+
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s\n\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String())
+ }
+
+ if !strings.Contains(ui.OutputWriter.String(), "HA Storage:") {
+ t.Fatalf("did not find HA Storage: %s", ui.OutputWriter.String())
+ }
+}
+
+func TestServer_BadSeparateHA(t *testing.T) {
+ ui := new(cli.MockUi)
+ c := &ServerCommand{
+ Meta: meta.Meta{
+ Ui: ui,
+ },
+ }
+
+ tmpfile, err := ioutil.TempFile("", "")
+ if err != nil {
+ t.Fatalf("error creating temp dir: %v", err)
+ }
+
+ tmpfile.WriteString(basehcl + consulhcl + badhaconsulhcl)
+ tmpfile.Close()
+ defer os.Remove(tmpfile.Name())
+
+ args := []string{"-config", tmpfile.Name()}
+
+ if code := c.Run(args); code == 0 {
+ t.Fatalf("bad: should have gotten an error on a bad HA config")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/server_test.go b/vendor/github.com/hashicorp/vault/command/server_test.go
new file mode 100644
index 0000000..f95016f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/server_test.go
@@ -0,0 +1,173 @@
+// +build !race
+
+package command
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io/ioutil"
+ "math/rand"
+ "os"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/meta"
+ "github.com/mitchellh/cli"
+)
+
+var (
+ basehcl = `
+disable_mlock = true
+
+listener "tcp" {
+ address = "127.0.0.1:8200"
+ tls_disable = "true"
+}
+`
+
+ consulhcl = `
+backend "consul" {
+ prefix = "foo/"
+ advertise_addr = "http://127.0.0.1:8200"
+ disable_registration = "true"
+}
+`
+ haconsulhcl = `
+ha_backend "consul" {
+ prefix = "bar/"
+ redirect_addr = "http://127.0.0.1:8200"
+ disable_registration = "true"
+}
+`
+
+ badhaconsulhcl = `
+ha_backend "file" {
+ path = "/dev/null"
+}
+`
+
+ reloadhcl = `
+backend "file" {
+ path = "/dev/null"
+}
+
+disable_mlock = true
+
+listener "tcp" {
+ address = "127.0.0.1:8203"
+ tls_cert_file = "TMPDIR/reload_FILE.pem"
+ tls_key_file = "TMPDIR/reload_FILE.key"
+}
+`
+)
+
+// The following tests have a go-metrics/exp manager race condition
+func TestServer_ReloadListener(t *testing.T) {
+ wd, _ := os.Getwd()
+ wd += "/server/test-fixtures/reload/"
+
+ td, err := ioutil.TempDir("", fmt.Sprintf("vault-test-%d", rand.New(rand.NewSource(time.Now().Unix())).Int63))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(td)
+
+ wg := &sync.WaitGroup{}
+
+ // Setup initial certs
+ inBytes, _ := ioutil.ReadFile(wd + "reload_foo.pem")
+ ioutil.WriteFile(td+"/reload_foo.pem", inBytes, 0777)
+ inBytes, _ = ioutil.ReadFile(wd + "reload_foo.key")
+ ioutil.WriteFile(td+"/reload_foo.key", inBytes, 0777)
+ inBytes, _ = ioutil.ReadFile(wd + "reload_bar.pem")
+ ioutil.WriteFile(td+"/reload_bar.pem", inBytes, 0777)
+ inBytes, _ = ioutil.ReadFile(wd + "reload_bar.key")
+ ioutil.WriteFile(td+"/reload_bar.key", inBytes, 0777)
+
+ relhcl := strings.Replace(strings.Replace(reloadhcl, "TMPDIR", td, -1), "FILE", "foo", -1)
+ ioutil.WriteFile(td+"/reload.hcl", []byte(relhcl), 0777)
+
+ inBytes, _ = ioutil.ReadFile(wd + "reload_ca.pem")
+ certPool := x509.NewCertPool()
+ ok := certPool.AppendCertsFromPEM(inBytes)
+ if !ok {
+ t.Fatal("not ok when appending CA cert")
+ }
+
+ ui := new(cli.MockUi)
+ c := &ServerCommand{
+ Meta: meta.Meta{
+ Ui: ui,
+ },
+ ShutdownCh: MakeShutdownCh(),
+ SighupCh: MakeSighupCh(),
+ }
+
+ finished := false
+ finishedMutex := sync.Mutex{}
+
+ wg.Add(1)
+ args := []string{"-config", td + "/reload.hcl"}
+ go func() {
+ if code := c.Run(args); code != 0 {
+ t.Error("got a non-zero exit status")
+ }
+ finishedMutex.Lock()
+ finished = true
+ finishedMutex.Unlock()
+ wg.Done()
+ }()
+
+ checkFinished := func() {
+ finishedMutex.Lock()
+ if finished {
+ t.Fatalf(fmt.Sprintf("finished early; relhcl was\n%s\nstdout was\n%s\nstderr was\n%s\n", relhcl, ui.OutputWriter.String(), ui.ErrorWriter.String()))
+ }
+ finishedMutex.Unlock()
+ }
+
+ testCertificateName := func(cn string) error {
+ conn, err := tls.Dial("tcp", "127.0.0.1:8203", &tls.Config{
+ RootCAs: certPool,
+ })
+ if err != nil {
+ return err
+ }
+ defer conn.Close()
+ if err = conn.Handshake(); err != nil {
+ return err
+ }
+ servName := conn.ConnectionState().PeerCertificates[0].Subject.CommonName
+ if servName != cn {
+ return fmt.Errorf("expected %s, got %s", cn, servName)
+ }
+ return nil
+ }
+
+ checkFinished()
+ time.Sleep(5 * time.Second)
+ checkFinished()
+
+ if err := testCertificateName("foo.example.com"); err != nil {
+ t.Fatalf("certificate name didn't check out: %s", err)
+ }
+
+ relhcl = strings.Replace(strings.Replace(reloadhcl, "TMPDIR", td, -1), "FILE", "bar", -1)
+ ioutil.WriteFile(td+"/reload.hcl", []byte(relhcl), 0777)
+
+ c.SighupCh <- struct{}{}
+ checkFinished()
+ time.Sleep(2 * time.Second)
+ checkFinished()
+
+ if err := testCertificateName("bar.example.com"); err != nil {
+ t.Fatalf("certificate name didn't check out: %s", err)
+ }
+
+ c.ShutdownCh <- struct{}{}
+
+ wg.Wait()
+}
diff --git a/vendor/github.com/hashicorp/vault/command/ssh.go b/vendor/github.com/hashicorp/vault/command/ssh.go
new file mode 100644
index 0000000..a9aebbe
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/ssh.go
@@ -0,0 +1,330 @@
+package command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "os"
+ "os/exec"
+ "os/user"
+ "strings"
+
+ "github.com/hashicorp/vault/builtin/logical/ssh"
+ "github.com/hashicorp/vault/meta"
+ "github.com/mitchellh/mapstructure"
+)
+
+// SSHCommand is a Command that establishes a SSH connection
+// with target by generating a dynamic key
+type SSHCommand struct {
+ meta.Meta
+}
+
+// Structure to hold the fields returned when asked for a credential from SSHh backend.
+type SSHCredentialResp struct {
+ KeyType string `mapstructure:"key_type"`
+ Key string `mapstructure:"key"`
+ Username string `mapstructure:"username"`
+ IP string `mapstructure:"ip"`
+ Port string `mapstructure:"port"`
+}
+
+func (c *SSHCommand) Run(args []string) int {
+ var role, mountPoint, format, userKnownHostsFile, strictHostKeyChecking string
+ var noExec bool
+ var sshCmdArgs []string
+ flags := c.Meta.FlagSet("ssh", meta.FlagSetDefault)
+ flags.StringVar(&strictHostKeyChecking, "strict-host-key-checking", "", "")
+ flags.StringVar(&userKnownHostsFile, "user-known-hosts-file", "", "")
+ flags.StringVar(&format, "format", "table", "")
+ flags.StringVar(&role, "role", "", "")
+ flags.StringVar(&mountPoint, "mount-point", "ssh", "")
+ flags.BoolVar(&noExec, "no-exec", false, "")
+
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ // If the flag is already set then it takes the precedence. If the flag is not
+ // set, try setting it from env var.
+ if os.Getenv("VAULT_SSH_STRICT_HOST_KEY_CHECKING") != "" && strictHostKeyChecking == "" {
+ strictHostKeyChecking = os.Getenv("VAULT_SSH_STRICT_HOST_KEY_CHECKING")
+ }
+ // Assign default value if both flag and env var are not set
+ if strictHostKeyChecking == "" {
+ strictHostKeyChecking = "ask"
+ }
+
+ // If the flag is already set then it takes the precedence. If the flag is not
+ // set, try setting it from env var.
+ if os.Getenv("VAULT_SSH_USER_KNOWN_HOSTS_FILE") != "" && userKnownHostsFile == "" {
+ userKnownHostsFile = os.Getenv("VAULT_SSH_USER_KNOWN_HOSTS_FILE")
+ }
+ // Assign default value if both flag and env var are not set
+ if userKnownHostsFile == "" {
+ userKnownHostsFile = "~/.ssh/known_hosts"
+ }
+
+ args = flags.Args()
+ if len(args) < 1 {
+ c.Ui.Error("ssh expects at least one argument")
+ return 1
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Error initializing client: %v", err))
+ return 1
+ }
+
+ // split the parameter username@ip
+ input := strings.Split(args[0], "@")
+ var username string
+ var ipAddr string
+
+ // If only IP is mentioned and username is skipped, assume username to
+ // be the current username. Vault SSH role's default username could have
+ // been used, but in order to retain the consistency with SSH command,
+ // current username is employed.
+ if len(input) == 1 {
+ u, err := user.Current()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Error fetching username: %v", err))
+ return 1
+ }
+ username = u.Username
+ ipAddr = input[0]
+ } else if len(input) == 2 {
+ username = input[0]
+ ipAddr = input[1]
+ } else {
+ c.Ui.Error(fmt.Sprintf("Invalid parameter: %q", args[0]))
+ return 1
+ }
+
+ // Resolving domain names to IP address on the client side.
+ // Vault only deals with IP addresses.
+ ip, err := net.ResolveIPAddr("ip", ipAddr)
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Error resolving IP Address: %v", err))
+ return 1
+ }
+
+ // Credentials are generated only against a registered role. If user
+ // does not specify a role with the SSH command, then lookup API is used
+ // to fetch all the roles with which this IP is associated. If there is
+ // only one role associated with it, use it to establish the connection.
+ if role == "" {
+ role, err = c.defaultRole(mountPoint, ip.String())
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Error choosing role: %v", err))
+ return 1
+ }
+ // Print the default role chosen so that user knows the role name
+ // if something doesn't work. If the role chosen is not allowed to
+ // be used by the user (ACL enforcement), then user should see an
+ // error message accordingly.
+ c.Ui.Output(fmt.Sprintf("Vault SSH: Role: %q", role))
+ }
+
+ data := map[string]interface{}{
+ "username": username,
+ "ip": ip.String(),
+ }
+
+ keySecret, err := client.SSHWithMountPoint(mountPoint).Credential(role, data)
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Error getting key for SSH session: %v", err))
+ return 1
+ }
+
+ // if no-exec was chosen, just print out the secret and return.
+ if noExec {
+ return OutputSecret(c.Ui, format, keySecret)
+ }
+
+ // Port comes back as a json.Number which mapstructure doesn't like, so convert it
+ if keySecret.Data["port"] != nil {
+ keySecret.Data["port"] = keySecret.Data["port"].(json.Number).String()
+ }
+ var resp SSHCredentialResp
+ if err := mapstructure.Decode(keySecret.Data, &resp); err != nil {
+ c.Ui.Error(fmt.Sprintf("Error parsing the credential response: %v", err))
+ return 1
+ }
+
+ if resp.KeyType == ssh.KeyTypeDynamic {
+ if len(resp.Key) == 0 {
+ c.Ui.Error(fmt.Sprintf("Invalid key"))
+ return 1
+ }
+ sshDynamicKeyFile, err := ioutil.TempFile("", fmt.Sprintf("vault_ssh_%s_%s_", username, ip.String()))
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Error creating temporary file: %v", err))
+ return 1
+ }
+
+ // Ensure that we delete the temporary file
+ defer os.Remove(sshDynamicKeyFile.Name())
+
+ if err = ioutil.WriteFile(sshDynamicKeyFile.Name(),
+ []byte(resp.Key), 0600); err != nil {
+ c.Ui.Error(fmt.Sprintf("Error storing the dynamic key into the temporary file: %v", err))
+ return 1
+ }
+ sshCmdArgs = append(sshCmdArgs, []string{"-i", sshDynamicKeyFile.Name()}...)
+
+ } else if resp.KeyType == ssh.KeyTypeOTP {
+ // Check if the application 'sshpass' is installed in the client machine.
+ // If it is then, use it to automate typing in OTP to the prompt. Unfortunately,
+ // it was not possible to automate it without a third-party application, with
+ // only the Go libraries.
+ // Feel free to try and remove this dependency.
+ sshpassPath, err := exec.LookPath("sshpass")
+ if err == nil {
+ sshCmdArgs = append(sshCmdArgs, []string{"-p", string(resp.Key), "ssh", "-o UserKnownHostsFile=" + userKnownHostsFile, "-o StrictHostKeyChecking=" + strictHostKeyChecking, "-p", resp.Port, username + "@" + ip.String()}...)
+ if len(args) > 1 {
+ sshCmdArgs = append(sshCmdArgs, args[1:]...)
+ }
+ sshCmd := exec.Command(sshpassPath, sshCmdArgs...)
+ sshCmd.Stdin = os.Stdin
+ sshCmd.Stdout = os.Stdout
+ err = sshCmd.Run()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Failed to establish SSH connection: %q", err))
+ }
+ return 0
+ }
+ c.Ui.Output("OTP for the session is " + resp.Key)
+ c.Ui.Output("[Note: Install 'sshpass' to automate typing in OTP]")
+ }
+ sshCmdArgs = append(sshCmdArgs, []string{"-o UserKnownHostsFile=" + userKnownHostsFile, "-o StrictHostKeyChecking=" + strictHostKeyChecking, "-p", resp.Port, username + "@" + ip.String()}...)
+ if len(args) > 1 {
+ sshCmdArgs = append(sshCmdArgs, args[1:]...)
+ }
+
+ sshCmd := exec.Command("ssh", sshCmdArgs...)
+ sshCmd.Stdin = os.Stdin
+ sshCmd.Stdout = os.Stdout
+
+ // Running the command as a separate command. The reason for using exec.Command instead
+ // of using crypto/ssh package is that, this way, user can have the same feeling of
+ // connecting to remote hosts with the ssh command. Package crypto/ssh did not have a way
+ // to establish an independent session like this.
+ err = sshCmd.Run()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Error while running ssh command: %q", err))
+ }
+
+ // If the session established was longer than the lease expiry, the secret
+ // might have been revoked already. If not, then revoke it. Since the key
+ // file is deleted and since user doesn't know the credential anymore, there
+ // is not point in Vault maintaining this secret anymore. Every time the command
+ // is run, a fresh credential is generated anyways.
+ err = client.Sys().Revoke(keySecret.LeaseID)
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Error revoking the key: %q", err))
+ }
+
+ return 0
+}
+
+// If user did not provide the role with which SSH connection has
+// to be established and if there is only one role associated with
+// the IP, it is used by default.
+func (c *SSHCommand) defaultRole(mountPoint, ip string) (string, error) {
+ data := map[string]interface{}{
+ "ip": ip,
+ }
+ client, err := c.Client()
+ if err != nil {
+ return "", err
+ }
+ secret, err := client.Logical().Write(mountPoint+"/lookup", data)
+ if err != nil {
+ return "", fmt.Errorf("Error finding roles for IP %q: %q", ip, err)
+
+ }
+ if secret == nil {
+ return "", fmt.Errorf("Error finding roles for IP %q: %q", ip, err)
+ }
+
+ if secret.Data["roles"] == nil {
+ return "", fmt.Errorf("No matching roles found for IP %q", ip)
+ }
+
+ if len(secret.Data["roles"].([]interface{})) == 1 {
+ return secret.Data["roles"].([]interface{})[0].(string), nil
+ } else {
+ var roleNames string
+ for _, item := range secret.Data["roles"].([]interface{}) {
+ roleNames += item.(string) + ", "
+ }
+ roleNames = strings.TrimRight(roleNames, ", ")
+ return "", fmt.Errorf("Roles:%q. "+`
+ Multiple roles are registered for this IP.
+ Select a role using '-role' option.
+ Note that all roles may not be permitted, based on ACLs.`, roleNames)
+ }
+}
+
+func (c *SSHCommand) Synopsis() string {
+ return "Initiate an SSH session"
+}
+
+func (c *SSHCommand) Help() string {
+ helpText := `
+Usage: vault ssh [options] username@ip
+
+ Establishes an SSH connection with the target machine.
+
+ This command generates a key and uses it to establish an SSH
+ connection with the target machine. This operation requires
+ that the SSH backend is mounted and at least one 'role' is
+ registered with Vault beforehand.
+
+ For setting up SSH backends with one-time-passwords, installation
+ of vault-ssh-helper or a compatible agent on target machines
+ is required. See [https://github.com/hashicorp/vault-ssh-agent].
+
+General Options:
+` + meta.GeneralOptionsUsage() + `
+SSH Options:
+
+ -role Role to be used to create the key.
+ Each IP is associated with a role. To see the associated
+ roles with IP, use "lookup" endpoint. If you are certain
+ that there is only one role associated with the IP, you can
+ skip mentioning the role. It will be chosen by default. If
+ there are no roles associated with the IP, register the
+ CIDR block of that IP using the "roles/" endpoint.
+
+ -no-exec Shows the credentials but does not establish connection.
+
+ -mount-point Mount point of SSH backend. If the backend is mounted at
+ 'ssh', which is the default as well, this parameter can be
+ skipped.
+
+ -format If no-exec option is enabled, then the credentials will be
+ printed out and SSH connection will not be established. The
+ format of the output can be 'json' or 'table'. JSON output
+ is useful when writing scripts. Default is 'table'.
+
+ -strict-host-key-checking This option corresponds to StrictHostKeyChecking of SSH configuration.
+ If 'sshpass' is employed to enable automated login, then if host key
+ is not "known" to the client, 'vault ssh' command will fail. Set this
+ option to "no" to bypass the host key checking. Defaults to "ask".
+ Can also be specified with VAULT_SSH_STRICT_HOST_KEY_CHECKING environment
+ variable.
+
+ -user-known-hosts-file This option corresponds to UserKnownHostsFile of SSH configuration.
+ Assigns the file to use for storing the host keys. If this option is
+ set to "/dev/null" along with "-strict-host-key-checking=no", both
+ warnings and host key checking can be avoided while establishing the
+ connection. Defaults to "~/.ssh/known_hosts". Can also be specified
+ with VAULT_SSH_USER_KNOWN_HOSTS_FILE environment variable.
+`
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/ssh_test.go b/vendor/github.com/hashicorp/vault/command/ssh_test.go
new file mode 100644
index 0000000..70a58f5
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/ssh_test.go
@@ -0,0 +1,199 @@
+package command
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "testing"
+
+ logicalssh "github.com/hashicorp/vault/builtin/logical/ssh"
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+const (
+ testCidr = "127.0.0.1/32"
+ testRoleName = "testRoleName"
+ testKey = "testKey"
+ testSharedPrivateKey = `
+-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEAvYvoRcWRxqOim5VZnuM6wHCbLUeiND0yaM1tvOl+Fsrz55DG
+A0OZp4RGAu1Fgr46E1mzxFz1+zY4UbcEExg+u21fpa8YH8sytSWW1FyuD8ICib0A
+/l8slmDMw4BkkGOtSlEqgscpkpv/TWZD1NxJWkPcULk8z6c7TOETn2/H9mL+v2RE
+mbE6NDEwJKfD3MvlpIqCP7idR+86rNBAODjGOGgyUbtFLT+K01XmDRALkV3V/nh+
+GltyjL4c6RU4zG2iRyV5RHlJtkml+UzUMkzr4IQnkCC32CC/wmtoo/IsAprpcHVe
+nkBn3eFQ7uND70p5n6GhN/KOh2j519JFHJyokwIDAQABAoIBAHX7VOvBC3kCN9/x
++aPdup84OE7Z7MvpX6w+WlUhXVugnmsAAVDczhKoUc/WktLLx2huCGhsmKvyVuH+
+MioUiE+vx75gm3qGx5xbtmOfALVMRLopjCnJYf6EaFA0ZeQ+NwowNW7Lu0PHmAU8
+Z3JiX8IwxTz14DU82buDyewO7v+cEr97AnERe3PUcSTDoUXNaoNxjNpEJkKREY6h
+4hAY676RT/GsRcQ8tqe/rnCqPHNd7JGqL+207FK4tJw7daoBjQyijWuB7K5chSal
+oPInylM6b13ASXuOAOT/2uSUBWmFVCZPDCmnZxy2SdnJGbsJAMl7Ma3MUlaGvVI+
+Tfh1aQkCgYEA4JlNOabTb3z42wz6mz+Nz3JRwbawD+PJXOk5JsSnV7DtPtfgkK9y
+6FTQdhnozGWShAvJvc+C4QAihs9AlHXoaBY5bEU7R/8UK/pSqwzam+MmxmhVDV7G
+IMQPV0FteoXTaJSikhZ88mETTegI2mik+zleBpVxvfdhE5TR+lq8Br0CgYEA2AwJ
+CUD5CYUSj09PluR0HHqamWOrJkKPFPwa+5eiTTCzfBBxImYZh7nXnWuoviXC0sg2
+AuvCW+uZ48ygv/D8gcz3j1JfbErKZJuV+TotK9rRtNIF5Ub7qysP7UjyI7zCssVM
+kuDd9LfRXaB/qGAHNkcDA8NxmHW3gpln4CFdSY8CgYANs4xwfercHEWaJ1qKagAe
+rZyrMpffAEhicJ/Z65lB0jtG4CiE6w8ZeUMWUVJQVcnwYD+4YpZbX4S7sJ0B8Ydy
+AhkSr86D/92dKTIt2STk6aCN7gNyQ1vW198PtaAWH1/cO2UHgHOy3ZUt5X/Uwxl9
+cex4flln+1Viumts2GgsCQKBgCJH7psgSyPekK5auFdKEr5+Gc/jB8I/Z3K9+g4X
+5nH3G1PBTCJYLw7hRzw8W/8oALzvddqKzEFHphiGXK94Lqjt/A4q1OdbCrhiE68D
+My21P/dAKB1UYRSs9Y8CNyHCjuZM9jSMJ8vv6vG/SOJPsnVDWVAckAbQDvlTHC9t
+O98zAoGAcbW6uFDkrv0XMCpB9Su3KaNXOR0wzag+WIFQRXCcoTvxVi9iYfUReQPi
+oOyBJU/HMVvBfv4g+OVFLVgSwwm6owwsouZ0+D/LasbuHqYyqYqdyPJQYzWA2Y+F
++B6f4RoPdSXj24JHPg/ioRxjaj094UXJxua2yfkcecGNEuBQHSs=
+-----END RSA PRIVATE KEY-----
+`
+)
+
+var testIP string
+var testPort string
+var testUserName string
+var testAdminUser string
+
+// Starts the server and initializes the servers IP address,
+// port and usernames to be used by the test cases.
+func initTest() {
+ addr, err := vault.StartSSHHostTestServer()
+ if err != nil {
+ panic(fmt.Sprintf("Error starting mock server:%s", err))
+ }
+ input := strings.Split(addr, ":")
+ testIP = input[0]
+ testPort = input[1]
+
+ testUserName := os.Getenv("VAULT_SSHTEST_USER")
+ if len(testUserName) == 0 {
+ panic("VAULT_SSHTEST_USER must be set to the desired user")
+ }
+ testAdminUser = testUserName
+}
+
+// This test is broken. Hence temporarily disabling it.
+func testSSH(t *testing.T) {
+ initTest()
+ // Add the SSH backend to the unsealed test core.
+ // This should be done before the unsealed core is created.
+ err := vault.AddTestLogicalBackend("ssh", logicalssh.Factory)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ mountCmd := &MountCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{"-address", addr, "ssh"}
+
+ // Mount the SSH backend
+ if code := mountCmd.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ client, err := mountCmd.Client()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ mounts, err := client.Sys().ListMounts()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Check if SSH backend is mounted or not
+ mount, ok := mounts["ssh/"]
+ if !ok {
+ t.Fatal("should have ssh mount")
+ }
+ if mount.Type != "ssh" {
+ t.Fatal("should have ssh type")
+ }
+
+ writeCmd := &WriteCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ // Create a 'named' key in vault
+ args = []string{
+ "-address", addr,
+ "ssh/keys/" + testKey,
+ "key=" + testSharedPrivateKey,
+ }
+ if code := writeCmd.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ // Create a role using the named key along with cidr, username and port
+ args = []string{
+ "-address", addr,
+ "ssh/roles/" + testRoleName,
+ "key=" + testKey,
+ "admin_user=" + testUserName,
+ "cidr=" + testCidr,
+ "port=" + testPort,
+ }
+ if code := writeCmd.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ sshCmd := &SSHCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ // Get the dynamic key and establish an SSH connection with target.
+ // Inline command when supplied, runs on target and terminates the
+ // connection. Use whoami as the inline command in target and get
+ // the result. Compare the result with the username used to connect
+ // to target. Test succeeds if they match.
+ args = []string{
+ "-address", addr,
+ "-role=" + testRoleName,
+ testUserName + "@" + testIP,
+ "/usr/bin/whoami",
+ }
+
+ // Creating pipe to get the result of the inline command run in target machine.
+ stdout := os.Stdout
+ r, w, err := os.Pipe()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ os.Stdout = w
+ if code := sshCmd.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+ bufChan := make(chan string)
+ go func() {
+ var buf bytes.Buffer
+ io.Copy(&buf, r)
+ bufChan <- buf.String()
+ }()
+ w.Close()
+ os.Stdout = stdout
+ userName := <-bufChan
+ userName = strings.TrimSpace(userName)
+
+ // Comparing the username used to connect to target and
+ // the username on the target, thereby verifying successful
+ // execution
+ if userName != testUserName {
+ t.Fatalf("err: username mismatch")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/status.go b/vendor/github.com/hashicorp/vault/command/status.go
new file mode 100644
index 0000000..3d584c7
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/status.go
@@ -0,0 +1,114 @@
+package command
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/meta"
+)
+
+// StatusCommand is a Command that outputs the status of whether
+// Vault is sealed or not as well as HA information.
+type StatusCommand struct {
+ meta.Meta
+}
+
+func (c *StatusCommand) Run(args []string) int {
+ flags := c.Meta.FlagSet("status", meta.FlagSetDefault)
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 1
+ }
+
+ sealStatus, err := client.Sys().SealStatus()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error checking seal status: %s", err))
+ return 1
+ }
+
+ outStr := fmt.Sprintf(
+ "Sealed: %v\n"+
+ "Key Shares: %d\n"+
+ "Key Threshold: %d\n"+
+ "Unseal Progress: %d\n"+
+ "Unseal Nonce: %v\n"+
+ "Version: %s",
+ sealStatus.Sealed,
+ sealStatus.N,
+ sealStatus.T,
+ sealStatus.Progress,
+ sealStatus.Nonce,
+ sealStatus.Version)
+
+ if sealStatus.ClusterName != "" && sealStatus.ClusterID != "" {
+ outStr = fmt.Sprintf("%s\nCluster Name: %s\nCluster ID: %s", outStr, sealStatus.ClusterName, sealStatus.ClusterID)
+ }
+
+ c.Ui.Output(outStr)
+
+ // Mask the 'Vault is sealed' error, since this means HA is enabled,
+ // but that we cannot query for the leader since we are sealed.
+ leaderStatus, err := client.Sys().Leader()
+ if err != nil && strings.Contains(err.Error(), "Vault is sealed") {
+ leaderStatus = &api.LeaderResponse{HAEnabled: true}
+ err = nil
+ }
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error checking leader status: %s", err))
+ return 1
+ }
+
+ // Output if HA is enabled
+ c.Ui.Output("")
+ c.Ui.Output(fmt.Sprintf("High-Availability Enabled: %v", leaderStatus.HAEnabled))
+ if leaderStatus.HAEnabled {
+ if sealStatus.Sealed {
+ c.Ui.Output("\tMode: sealed")
+ } else {
+ mode := "standby"
+ if leaderStatus.IsSelf {
+ mode = "active"
+ }
+ c.Ui.Output(fmt.Sprintf("\tMode: %s", mode))
+
+ if leaderStatus.LeaderAddress == "" {
+ leaderStatus.LeaderAddress = ""
+ }
+ c.Ui.Output(fmt.Sprintf("\tLeader: %s", leaderStatus.LeaderAddress))
+ }
+ }
+
+ if sealStatus.Sealed {
+ return 2
+ } else {
+ return 0
+ }
+}
+
+func (c *StatusCommand) Synopsis() string {
+ return "Outputs status of whether Vault is sealed and if HA mode is enabled"
+}
+
+func (c *StatusCommand) Help() string {
+ helpText := `
+Usage: vault status [options]
+
+ Outputs the state of the Vault, sealed or unsealed and if HA is enabled.
+
+ This command outputs whether or not the Vault is sealed. The exit
+ code also reflects the seal status (0 unsealed, 2 sealed, 1 error).
+
+General Options:
+` + meta.GeneralOptionsUsage()
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/status_test.go b/vendor/github.com/hashicorp/vault/command/status_test.go
new file mode 100644
index 0000000..92e7f74
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/status_test.go
@@ -0,0 +1,39 @@
+package command
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestStatus(t *testing.T) {
+ ui := new(cli.MockUi)
+ c := &StatusCommand{
+ Meta: meta.Meta{
+ Ui: ui,
+ },
+ }
+
+ core := vault.TestCore(t)
+ keys, _ := vault.TestCoreInit(t, core)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ args := []string{"-address", addr}
+ if code := c.Run(args); code != 2 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ for _, key := range keys {
+ if _, err := core.Unseal(key); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ }
+
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/step-down.go b/vendor/github.com/hashicorp/vault/command/step-down.go
new file mode 100644
index 0000000..be445a8
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/step-down.go
@@ -0,0 +1,55 @@
+package command
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/meta"
+)
+
+// StepDownCommand is a Command that seals the vault.
+type StepDownCommand struct {
+ meta.Meta
+}
+
+func (c *StepDownCommand) Run(args []string) int {
+ flags := c.Meta.FlagSet("step-down", meta.FlagSetDefault)
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ if err := client.Sys().StepDown(); err != nil {
+ c.Ui.Error(fmt.Sprintf("Error stepping down: %s", err))
+ return 1
+ }
+
+ return 0
+}
+
+func (c *StepDownCommand) Synopsis() string {
+ return "Force the Vault node to give up active duty"
+}
+
+func (c *StepDownCommand) Help() string {
+ helpText := `
+Usage: vault step-down [options]
+
+ Force the Vault node to step down from active duty.
+
+ This causes the indicated node to give up active status. Note that while the
+ affected node will have a short delay before attempting to grab the lock
+ again, if no other node grabs the lock beforehand, it is possible for the
+ same node to re-grab the lock and become active again.
+
+General Options:
+` + meta.GeneralOptionsUsage()
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/token/helper.go b/vendor/github.com/hashicorp/vault/command/token/helper.go
new file mode 100644
index 0000000..db068be
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/token/helper.go
@@ -0,0 +1,14 @@
+package token
+
+// TokenHelper is an interface that contains basic operations that must be
+// implemented by a token helper
+type TokenHelper interface {
+ // Path displays a backend-specific path; for the internal helper this
+ // is the location of the token stored on disk; for the external helper
+ // this is the location of the binary being invoked
+ Path() string
+
+ Erase() error
+ Get() (string, error)
+ Store(string) error
+}
diff --git a/vendor/github.com/hashicorp/vault/command/token/helper_external.go b/vendor/github.com/hashicorp/vault/command/token/helper_external.go
new file mode 100644
index 0000000..40de9bf
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/token/helper_external.go
@@ -0,0 +1,131 @@
+package token
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+)
+
+// ExternalTokenHelperPath takes the configured path to a helper and expands it to
+// a full absolute path that can be executed. As of 0.5, the default token
+// helper is internal, to avoid problems running in dev mode (see GH-850 and
+// GH-783), so special assumptions of prepending "vault token-" no longer
+// apply.
+//
+// As an additional result, only absolute paths are now allowed. Looking in the
+// path or a current directory for an arbitrary executable could allow someone
+// to switch the expected binary for one further up the path (or in the current
+// directory), potentially opening up execution of an arbitrary binary.
+func ExternalTokenHelperPath(path string) (string, error) {
+ if !filepath.IsAbs(path) {
+ var err error
+ path, err = filepath.Abs(path)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ if _, err := os.Stat(path); err != nil {
+ return "", fmt.Errorf("unknown error getting the external helper path")
+ }
+
+ return path, nil
+}
+
+// ExternalTokenHelper is the struct that has all the logic for storing and retrieving
+// tokens from the token helper. The API for the helpers is simple: the
+// BinaryPath is executed within a shell with environment Env. The last argument
+// appended will be the operation, which is:
+//
+// * "get" - Read the value of the token and write it to stdout.
+// * "store" - Store the value of the token which is on stdin. Output
+// nothing.
+// * "erase" - Erase the contents stored. Output nothing.
+//
+// Any errors can be written on stdout. If the helper exits with a non-zero
+// exit code then the stderr will be made part of the error value.
+type ExternalTokenHelper struct {
+ BinaryPath string
+ Env []string
+}
+
+// Erase deletes the contents from the helper.
+func (h *ExternalTokenHelper) Erase() error {
+ cmd, err := h.cmd("erase")
+ if err != nil {
+ return fmt.Errorf("Error: %s", err)
+ }
+ if output, err := cmd.CombinedOutput(); err != nil {
+ return fmt.Errorf(
+ "Error: %s\n\n%s", err, string(output))
+ }
+ return nil
+}
+
+// Get gets the token value from the helper.
+func (h *ExternalTokenHelper) Get() (string, error) {
+ var buf, stderr bytes.Buffer
+ cmd, err := h.cmd("get")
+ if err != nil {
+ return "", fmt.Errorf("Error: %s", err)
+ }
+ cmd.Stdout = &buf
+ cmd.Stderr = &stderr
+ if err := cmd.Run(); err != nil {
+ return "", fmt.Errorf(
+ "Error: %s\n\n%s", err, stderr.String())
+ }
+
+ return buf.String(), nil
+}
+
+// Store stores the token value into the helper.
+func (h *ExternalTokenHelper) Store(v string) error {
+ buf := bytes.NewBufferString(v)
+ cmd, err := h.cmd("store")
+ if err != nil {
+ return fmt.Errorf("Error: %s", err)
+ }
+ cmd.Stdin = buf
+ if output, err := cmd.CombinedOutput(); err != nil {
+ return fmt.Errorf(
+ "Error: %s\n\n%s", err, string(output))
+ }
+
+ return nil
+}
+
+func (h *ExternalTokenHelper) Path() string {
+ return h.BinaryPath
+}
+
+func (h *ExternalTokenHelper) cmd(op string) (*exec.Cmd, error) {
+ script := strings.Replace(h.BinaryPath, "\\", "\\\\", -1) + " " + op
+ cmd, err := ExecScript(script)
+ if err != nil {
+ return nil, err
+ }
+ cmd.Env = h.Env
+ return cmd, nil
+}
+
+// ExecScript returns a command to execute a script
+func ExecScript(script string) (*exec.Cmd, error) {
+ var shell, flag string
+ if runtime.GOOS == "windows" {
+ shell = "cmd"
+ flag = "/C"
+ } else {
+ shell = "/bin/sh"
+ flag = "-c"
+ }
+ if other := os.Getenv("SHELL"); other != "" {
+ shell = other
+ }
+ cmd := exec.Command(shell, flag, script)
+ return cmd, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/command/token/helper_external_test.go b/vendor/github.com/hashicorp/vault/command/token/helper_external_test.go
new file mode 100644
index 0000000..b49dd93
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/token/helper_external_test.go
@@ -0,0 +1,138 @@
+package token
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+func TestExternalTokenHelperPath(t *testing.T) {
+ cases := map[string]string{}
+
+ unixCases := map[string]string{
+ "/foo": "/foo",
+ }
+ windowsCases := map[string]string{
+ "C:/foo": "C:/foo",
+ `C:\Program Files`: `C:\Program Files`,
+ }
+
+ var runtimeCases map[string]string
+ if runtime.GOOS == "windows" {
+ runtimeCases = windowsCases
+ } else {
+ runtimeCases = unixCases
+ }
+
+ for k, v := range runtimeCases {
+ cases[k] = v
+ }
+
+ // We don't expect those to actually exist, so we expect an error. For now,
+ // I'm commenting out the rest of this code as we don't have real external
+ // helpers to test with and the os.Stat will fail with our fake test cases.
+ /*
+ for k, v := range cases {
+ actual, err := ExternalTokenHelperPath(k)
+ if err != nil {
+ t.Fatalf("error getting external helper path: %v", err)
+ }
+ if actual != v {
+ t.Fatalf(
+ "input: %s, expected: %s, got: %s",
+ k, v, actual)
+ }
+ }
+ */
+}
+
+func TestExternalTokenHelper(t *testing.T) {
+ Test(t, testExternalTokenHelper(t))
+}
+
+func testExternalTokenHelper(t *testing.T) *ExternalTokenHelper {
+ return &ExternalTokenHelper{BinaryPath: helperPath("helper"), Env: helperEnv()}
+}
+
+func helperPath(s ...string) string {
+ cs := []string{"-test.run=TestExternalTokenHelperProcess", "--"}
+ cs = append(cs, s...)
+ return fmt.Sprintf(
+ "%s %s",
+ os.Args[0],
+ strings.Join(cs, " "))
+}
+
+func helperEnv() []string {
+ var env []string
+
+ tf, err := ioutil.TempFile("", "vault")
+ if err != nil {
+ panic(err)
+ }
+ tf.Close()
+
+ env = append(env, "GO_HELPER_PATH="+tf.Name(), "GO_WANT_HELPER_PROCESS=1")
+ return env
+}
+
+// This is not a real test. This is just a helper process kicked off by tests.
+func TestExternalTokenHelperProcess(*testing.T) {
+ if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
+ return
+ }
+
+ defer os.Exit(0)
+
+ args := os.Args
+ for len(args) > 0 {
+ if args[0] == "--" {
+ args = args[1:]
+ break
+ }
+
+ args = args[1:]
+ }
+
+ if len(args) == 0 {
+ fmt.Fprintf(os.Stderr, "No command\n")
+ os.Exit(2)
+ }
+
+ cmd, args := args[0], args[1:]
+ switch cmd {
+ case "helper":
+ path := os.Getenv("GO_HELPER_PATH")
+
+ switch args[0] {
+ case "erase":
+ os.Remove(path)
+ case "get":
+ f, err := os.Open(path)
+ if os.IsNotExist(err) {
+ return
+ }
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Err: %s\n", err)
+ os.Exit(1)
+ }
+ defer f.Close()
+ io.Copy(os.Stdout, f)
+ case "store":
+ f, err := os.Create(path)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Err: %s\n", err)
+ os.Exit(1)
+ }
+ defer f.Close()
+ io.Copy(f, os.Stdin)
+ }
+ default:
+ fmt.Fprintf(os.Stderr, "Unknown command: %q\n", cmd)
+ os.Exit(2)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/token/helper_internal.go b/vendor/github.com/hashicorp/vault/command/token/helper_internal.go
new file mode 100644
index 0000000..89793cb
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/token/helper_internal.go
@@ -0,0 +1,78 @@
+package token
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "github.com/mitchellh/go-homedir"
+)
+
+// InternalTokenHelper fulfills the TokenHelper interface when no external
+// token-helper is configured, and avoids shelling out
+type InternalTokenHelper struct {
+ tokenPath string
+}
+
+// populateTokenPath figures out the token path using homedir to get the user's
+// home directory
+func (i *InternalTokenHelper) populateTokenPath() {
+ homePath, err := homedir.Dir()
+ if err != nil {
+ panic(fmt.Errorf("error getting user's home directory: %v", err))
+ }
+ i.tokenPath = homePath + "/.vault-token"
+}
+
+func (i *InternalTokenHelper) Path() string {
+ return i.tokenPath
+}
+
+// Get gets the value of the stored token, if any
+func (i *InternalTokenHelper) Get() (string, error) {
+ i.populateTokenPath()
+ f, err := os.Open(i.tokenPath)
+ if os.IsNotExist(err) {
+ return "", nil
+ }
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+
+ buf := bytes.NewBuffer(nil)
+ if _, err := io.Copy(buf, f); err != nil {
+ return "", err
+ }
+
+ return strings.TrimSpace(buf.String()), nil
+}
+
+// Store stores the value of the token to the file
+func (i *InternalTokenHelper) Store(input string) error {
+ i.populateTokenPath()
+ f, err := os.OpenFile(i.tokenPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ buf := bytes.NewBufferString(input)
+ if _, err := io.Copy(f, buf); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Erase erases the value of the token
+func (i *InternalTokenHelper) Erase() error {
+ i.populateTokenPath()
+ if err := os.Remove(i.tokenPath); err != nil && !os.IsNotExist(err) {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/command/token/helper_internal_test.go b/vendor/github.com/hashicorp/vault/command/token/helper_internal_test.go
new file mode 100644
index 0000000..4ac527c
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/token/helper_internal_test.go
@@ -0,0 +1,11 @@
+package token
+
+import (
+ "testing"
+)
+
+// TestCommand re-uses the existing Test function to ensure proper behavior of
+// the internal token helper
+func TestCommand(t *testing.T) {
+ Test(t, &InternalTokenHelper{})
+}
diff --git a/vendor/github.com/hashicorp/vault/command/token/testing.go b/vendor/github.com/hashicorp/vault/command/token/testing.go
new file mode 100644
index 0000000..725f127
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/token/testing.go
@@ -0,0 +1,78 @@
+package token
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/mitchellh/cli"
+)
+
+// Test is a public function that can be used in other tests to
+// test that a helper is functioning properly.
+func Test(t *testing.T, h TokenHelper) {
+ if err := h.Store("foo"); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ v, err := h.Get()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if v != "foo" {
+ t.Fatalf("bad: %#v", v)
+ }
+
+ if err := h.Erase(); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ v, err = h.Get()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if v != "" {
+ t.Fatalf("bad: %#v", v)
+ }
+}
+
+// TestProcess is used to re-execute this test in order to use it as the
+// helper process. For this to work, the TestExternalTokenHelperProcess function must
+// exist.
+func TestProcess(t *testing.T, s ...string) {
+ h := &ExternalTokenHelper{BinaryPath: TestProcessPath(t, s...)}
+ Test(t, h)
+}
+
+// TestProcessPath returns the path to the test process.
+func TestProcessPath(t *testing.T, s ...string) string {
+ cs := []string{"-test.run=TestExternalTokenHelperProcess", "--", "GO_WANT_HELPER_PROCESS"}
+ cs = append(cs, s...)
+ return fmt.Sprintf(
+ "%s %s",
+ os.Args[0],
+ strings.Join(cs, " "))
+}
+
+// TestExternalTokenHelperProcessCLI can be called to implement TestExternalTokenHelperProcess
+// for TestProcess that just executes a CLI command.
+func TestExternalTokenHelperProcessCLI(t *testing.T, cmd cli.Command) {
+ args := os.Args
+ for len(args) > 0 {
+ if args[0] == "--" {
+ args = args[1:]
+ break
+ }
+
+ args = args[1:]
+ }
+ if len(args) == 0 || args[0] != "GO_WANT_HELPER_PROCESS" {
+ return
+ }
+ args = args[1:]
+
+ os.Exit(cmd.Run(args))
+}
diff --git a/vendor/github.com/hashicorp/vault/command/token_create.go b/vendor/github.com/hashicorp/vault/command/token_create.go
new file mode 100644
index 0000000..f8d8c59
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/token_create.go
@@ -0,0 +1,176 @@
+package command
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/helper/flag-kv"
+ "github.com/hashicorp/vault/helper/flag-slice"
+ "github.com/hashicorp/vault/meta"
+)
+
+// TokenCreateCommand is a Command that mounts a new mount.
+type TokenCreateCommand struct {
+ meta.Meta
+}
+
+func (c *TokenCreateCommand) Run(args []string) int {
+ var format string
+ var id, displayName, lease, ttl, explicitMaxTTL, period, role string
+ var orphan, noDefaultPolicy, renewable bool
+ var metadata map[string]string
+ var numUses int
+ var policies []string
+ flags := c.Meta.FlagSet("mount", meta.FlagSetDefault)
+ flags.StringVar(&format, "format", "table", "")
+ flags.StringVar(&displayName, "display-name", "", "")
+ flags.StringVar(&id, "id", "", "")
+ flags.StringVar(&lease, "lease", "", "")
+ flags.StringVar(&ttl, "ttl", "", "")
+ flags.StringVar(&explicitMaxTTL, "explicit-max-ttl", "", "")
+ flags.StringVar(&period, "period", "", "")
+ flags.StringVar(&role, "role", "", "")
+ flags.BoolVar(&orphan, "orphan", false, "")
+ flags.BoolVar(&renewable, "renewable", true, "")
+ flags.BoolVar(&noDefaultPolicy, "no-default-policy", false, "")
+ flags.IntVar(&numUses, "use-limit", 0, "")
+ flags.Var((*kvFlag.Flag)(&metadata), "metadata", "")
+ flags.Var((*sliceflag.StringFlag)(&policies), "policy", "")
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ args = flags.Args()
+ if len(args) != 0 {
+ flags.Usage()
+ c.Ui.Error(fmt.Sprintf(
+ "\ntoken-create expects no arguments"))
+ return 1
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ if ttl == "" {
+ ttl = lease
+ }
+
+ tcr := &api.TokenCreateRequest{
+ ID: id,
+ Policies: policies,
+ Metadata: metadata,
+ TTL: ttl,
+ NoParent: orphan,
+ NoDefaultPolicy: noDefaultPolicy,
+ DisplayName: displayName,
+ NumUses: numUses,
+ Renewable: new(bool),
+ ExplicitMaxTTL: explicitMaxTTL,
+ Period: period,
+ }
+ *tcr.Renewable = renewable
+
+ var secret *api.Secret
+ if role != "" {
+ secret, err = client.Auth().Token().CreateWithRole(tcr, role)
+ } else {
+ secret, err = client.Auth().Token().Create(tcr)
+ }
+
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error creating token: %s", err))
+ return 2
+ }
+
+ return OutputSecret(c.Ui, format, secret)
+}
+
+func (c *TokenCreateCommand) Synopsis() string {
+ return "Create a new auth token"
+}
+
+func (c *TokenCreateCommand) Help() string {
+ helpText := `
+Usage: vault token-create [options]
+
+ Create a new auth token.
+
+ This command creates a new token that can be used for authentication.
+ This token will be created as a child of your token. The created token
+ will inherit your policies, or can be assigned a subset of your policies.
+
+ A lease can also be associated with the token. If a lease is not associated
+ with the token, then it cannot be renewed. If a lease is associated with
+ the token, it will expire after that amount of time unless it is renewed.
+
+ Metadata associated with the token (specified with "-metadata") is
+ written to the audit log when the token is used.
+
+ If a role is specified, the role may override parameters specified here.
+
+General Options:
+` + meta.GeneralOptionsUsage() + `
+Token Options:
+
+ -id="7699125c-d8...." The token value that clients will use to authenticate
+ with Vault. If not provided this defaults to a 36
+ character UUID. A root token is required to specify
+ the ID of a token.
+
+ -display-name="name" A display name to associate with this token. This
+ is a non-security sensitive value used to help
+ identify created secrets, i.e. prefixes.
+
+ -ttl="1h" Initial TTL to associate with the token; renewals can
+ extend this value.
+
+ -explicit-max-ttl="1h" An explicit maximum lifetime for the token. Unlike
+ normal token TTLs, which can be renewed up until the
+ maximum TTL set on the auth/token mount or the system
+ configuration file, this lifetime is a hard limit set
+ on the token itself and cannot be exceeded.
+
+ -period="1h" If specified, the token will be periodic; it will
+ have no maximum TTL (unless an "explicit-max-ttl" is
+ also set) but every renewal will use the given
+ period. Requires a root/sudo token to use.
+
+ -renewable=true Whether or not the token is renewable to extend its
+ TTL up to Vault's configured maximum TTL for tokens.
+ This defaults to true; set to false to disable
+ renewal of this token.
+
+ -metadata="key=value" Metadata to associate with the token. This shows
+ up in the audit log. This can be specified multiple
+ times.
+
+ -orphan If specified, the token will have no parent. This
+ prevents the new token from being revoked with
+ your token. Requires a root/sudo token to use.
+
+ -no-default-policy If specified, the token will not have the "default"
+ policy included in its policy set.
+
+ -policy="name" Policy to associate with this token. This can be
+ specified multiple times.
+
+ -use-limit=5 The number of times this token can be used until
+ it is automatically revoked.
+
+ -format=table The format for output. By default it is a whitespace-
+ delimited table. This can also be json or yaml.
+
+ -role=name If set, the token will be created against the named
+ role. The role may override other parameters. This
+ requires the client to have permissions on the
+ appropriate endpoint (auth/token/create/).
+`
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/token_create_test.go b/vendor/github.com/hashicorp/vault/command/token_create_test.go
new file mode 100644
index 0000000..9db2a26
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/token_create_test.go
@@ -0,0 +1,38 @@
+package command
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestTokenCreate(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &TokenCreateCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ // Ensure we get lease info
+ output := ui.OutputWriter.String()
+ if !strings.Contains(output, "token_duration") {
+ t.Fatalf("bad: %#v", output)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/token_lookup.go b/vendor/github.com/hashicorp/vault/command/token_lookup.go
new file mode 100644
index 0000000..c1c62ef
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/token_lookup.go
@@ -0,0 +1,100 @@
+package command
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/meta"
+)
+
+// TokenLookupCommand is a Command that outputs details about the
+// provided.
+type TokenLookupCommand struct {
+ meta.Meta
+}
+
+func (c *TokenLookupCommand) Run(args []string) int {
+ var format string
+ var accessor bool
+ flags := c.Meta.FlagSet("token-lookup", meta.FlagSetDefault)
+ flags.BoolVar(&accessor, "accessor", false, "")
+ flags.StringVar(&format, "format", "table", "")
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ args = flags.Args()
+ if len(args) > 1 {
+ flags.Usage()
+ c.Ui.Error(fmt.Sprintf(
+ "\ntoken-lookup expects at most one argument"))
+ return 1
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "error initializing client: %s", err))
+ return 2
+ }
+
+ var secret *api.Secret
+ switch {
+ case !accessor && len(args) == 0:
+ secret, err = client.Auth().Token().LookupSelf()
+ case !accessor && len(args) == 1:
+ secret, err = client.Auth().Token().Lookup(args[0])
+ case accessor && len(args) == 1:
+ secret, err = client.Auth().Token().LookupAccessor(args[0])
+ default:
+ // This happens only when accessor is set and no argument is passed
+ c.Ui.Error(fmt.Sprintf("token-lookup expects an argument when accessor flag is set"))
+ return 1
+ }
+
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "error looking up token: %s", err))
+ return 1
+ }
+ return OutputSecret(c.Ui, format, secret)
+}
+
+func doTokenLookup(args []string, client *api.Client) (*api.Secret, error) {
+ if len(args) == 0 {
+ return client.Auth().Token().LookupSelf()
+ }
+
+ token := args[0]
+ return client.Auth().Token().Lookup(token)
+}
+
+func (c *TokenLookupCommand) Synopsis() string {
+ return "Display information about the specified token"
+}
+
+func (c *TokenLookupCommand) Help() string {
+ helpText := `
+Usage: vault token-lookup [options] [token|accessor]
+
+ Displays information about the specified token. If no token is specified, the
+ operation is performed on the currently authenticated token i.e. lookup-self.
+ Information about the token can be retrieved using the token accessor via the
+ '-accessor' flag.
+
+General Options:
+` + meta.GeneralOptionsUsage() + `
+Token Lookup Options:
+ -accessor A boolean flag, if set, treats the argument as an accessor of the token.
+ Note that the response of the command when this is set, will not contain
+ the token ID. Accessor is only meant for looking up the token properties
+ (and for revocation via '/auth/token/revoke-accessor/' endpoint).
+
+ -format=table The format for output. By default it is a whitespace-
+ delimited table. This can also be json or yaml.
+
+`
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/token_lookup_test.go b/vendor/github.com/hashicorp/vault/command/token_lookup_test.go
new file mode 100644
index 0000000..143b944
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/token_lookup_test.go
@@ -0,0 +1,124 @@
+package command
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestTokenLookupAccessor(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &TokenLookupCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+ args := []string{
+ "-address", addr,
+ }
+ c.Run(args)
+
+ // Create a new token for us to use
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ resp, err := client.Auth().Token().Create(&api.TokenCreateRequest{
+ Lease: "1h",
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Enable the accessor flag
+ args = append(args, "-accessor")
+
+ // Expect failure if no argument is passed when accessor flag is set
+ code := c.Run(args)
+ if code == 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ // Add token accessor as arg
+ args = append(args, resp.Auth.Accessor)
+ code = c.Run(args)
+ if code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+}
+
+func TestTokenLookupSelf(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &TokenLookupCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ }
+
+ // Run it against itself
+ code := c.Run(args)
+
+ // Verify it worked
+ if code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+}
+
+func TestTokenLookup(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &TokenLookupCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ }
+ // Run it once for client
+ c.Run(args)
+
+ // Create a new token for us to use
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ resp, err := client.Auth().Token().Create(&api.TokenCreateRequest{
+ Lease: "1h",
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Add token as arg for real test and run it
+ args = append(args, resp.Auth.ClientToken)
+ code := c.Run(args)
+
+ // Verify it worked
+ if code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/token_renew.go b/vendor/github.com/hashicorp/vault/command/token_renew.go
new file mode 100644
index 0000000..8ec1a55
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/token_renew.go
@@ -0,0 +1,115 @@
+package command
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/helper/parseutil"
+ "github.com/hashicorp/vault/meta"
+)
+
+// TokenRenewCommand is a Command that mounts a new mount.
+type TokenRenewCommand struct {
+ meta.Meta
+}
+
+func (c *TokenRenewCommand) Run(args []string) int {
+ var format, increment string
+ flags := c.Meta.FlagSet("token-renew", meta.FlagSetDefault)
+ flags.StringVar(&format, "format", "table", "")
+ flags.StringVar(&increment, "increment", "", "")
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ args = flags.Args()
+ if len(args) > 2 {
+ flags.Usage()
+ c.Ui.Error(fmt.Sprintf(
+ "\ntoken-renew expects at most two arguments"))
+ return 1
+ }
+
+ var token string
+ if len(args) > 0 {
+ token = args[0]
+ }
+
+ var inc int
+ // If both are specified prefer the argument
+ if len(args) == 2 {
+ increment = args[1]
+ }
+ if increment != "" {
+ dur, err := parseutil.ParseDurationSecond(increment)
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Invalid increment: %s", err))
+ return 1
+ }
+
+ inc = int(dur / time.Second)
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ // If the given token is the same as the client's, use renew-self instead
+ // as this is far more likely to be allowed via policy
+ var secret *api.Secret
+ if token == "" {
+ secret, err = client.Auth().Token().RenewSelf(inc)
+ } else {
+ secret, err = client.Auth().Token().Renew(token, inc)
+ }
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error renewing token: %s", err))
+ return 1
+ }
+
+ return OutputSecret(c.Ui, format, secret)
+}
+
+func (c *TokenRenewCommand) Synopsis() string {
+ return "Renew an auth token if there is an associated lease"
+}
+
+func (c *TokenRenewCommand) Help() string {
+ helpText := `
+Usage: vault token-renew [options] [token] [increment]
+
+ Renew an auth token, extending the amount of time it can be used. If a token
+ is given to the command, '/auth/token/renew' will be called with the given
+ token; otherwise, '/auth/token/renew-self' will be called with the client
+ token.
+
+ This command is similar to "renew", but "renew" is only for leases; this
+ command is only for tokens.
+
+ An optional increment can be given to request a certain number of seconds to
+ increment the lease. This request is advisory; Vault may not adhere to it at
+ all. If a token is being passed in on the command line, the increment can as
+ well; otherwise it must be passed in via the '-increment' flag.
+
+General Options:
+` + meta.GeneralOptionsUsage() + `
+Token Renew Options:
+
+ -increment=3600 The desired increment. If not supplied, Vault will
+ use the default TTL. If supplied, it may still be
+ ignored. This can be submitted as an integer number
+ of seconds or a string duration (e.g. "72h").
+
+ -format=table The format for output. By default it is a whitespace-
+ delimited table. This can also be json or yaml.
+
+`
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/token_renew_test.go b/vendor/github.com/hashicorp/vault/command/token_renew_test.go
new file mode 100644
index 0000000..270ee7e
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/token_renew_test.go
@@ -0,0 +1,177 @@
+package command
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestTokenRenew(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &TokenRenewCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ }
+
+ // Run it once for client
+ c.Run(args)
+
+ // Create a token
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ resp, err := client.Auth().Token().Create(&api.TokenCreateRequest{
+ Lease: "1h",
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Renew, passing in the token
+ args = append(args, resp.Auth.ClientToken)
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+}
+
+func TestTokenRenewWithIncrement(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &TokenRenewCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ }
+
+ // Run it once for client
+ c.Run(args)
+
+ // Create a token
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ resp, err := client.Auth().Token().Create(&api.TokenCreateRequest{
+ Lease: "1h",
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Renew, passing in the token
+ args = append(args, resp.Auth.ClientToken)
+ args = append(args, "72h")
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+}
+
+func TestTokenRenewSelf(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &TokenRenewCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ }
+
+ // Run it once for client
+ c.Run(args)
+
+ // Create a token
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ resp, err := client.Auth().Token().Create(&api.TokenCreateRequest{
+ Lease: "1h",
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if resp.Auth.ClientToken == "" {
+ t.Fatal("returned client token is empty")
+ }
+
+ c.Meta.ClientToken = resp.Auth.ClientToken
+
+ // Renew using the self endpoint
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+}
+
+func TestTokenRenewSelfWithIncrement(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &TokenRenewCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ }
+
+ // Run it once for client
+ c.Run(args)
+
+ // Create a token
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ resp, err := client.Auth().Token().Create(&api.TokenCreateRequest{
+ Lease: "1h",
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if resp.Auth.ClientToken == "" {
+ t.Fatal("returned client token is empty")
+ }
+
+ c.Meta.ClientToken = resp.Auth.ClientToken
+
+ args = append(args, "-increment=72h")
+ // Renew using the self endpoint
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/token_revoke.go b/vendor/github.com/hashicorp/vault/command/token_revoke.go
new file mode 100644
index 0000000..6e4105d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/token_revoke.go
@@ -0,0 +1,138 @@
+package command
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/meta"
+)
+
+// TokenRevokeCommand is a Command that mounts a new mount.
+type TokenRevokeCommand struct {
+ meta.Meta
+}
+
+func (c *TokenRevokeCommand) Run(args []string) int {
+ var mode string
+ var accessor bool
+ var self bool
+ var token string
+ flags := c.Meta.FlagSet("token-revoke", meta.FlagSetDefault)
+ flags.BoolVar(&accessor, "accessor", false, "")
+ flags.BoolVar(&self, "self", false, "")
+ flags.StringVar(&mode, "mode", "", "")
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ args = flags.Args()
+ switch {
+ case len(args) == 1 && !self:
+ token = args[0]
+ case len(args) != 0 && self:
+ flags.Usage()
+ c.Ui.Error(fmt.Sprintf(
+ "\ntoken-revoke expects no arguments when revoking self"))
+ return 1
+ case len(args) != 1 && !self:
+ flags.Usage()
+ c.Ui.Error(fmt.Sprintf(
+ "\ntoken-revoke expects one argument or the 'self' flag"))
+ return 1
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ var fn func(string) error
+ // Handle all 6 possible combinations
+ switch {
+ case !accessor && self && mode == "":
+ fn = client.Auth().Token().RevokeSelf
+ case !accessor && !self && mode == "":
+ fn = client.Auth().Token().RevokeTree
+ case !accessor && !self && mode == "orphan":
+ fn = client.Auth().Token().RevokeOrphan
+ case !accessor && !self && mode == "path":
+ fn = client.Sys().RevokePrefix
+ case accessor && !self && mode == "":
+ fn = client.Auth().Token().RevokeAccessor
+ case accessor && self:
+ c.Ui.Error("token-revoke cannot be run on self when 'accessor' flag is set")
+ return 1
+ case self && mode != "":
+ c.Ui.Error("token-revoke cannot be run on self when 'mode' flag is set")
+ return 1
+ case accessor && mode == "orphan":
+ c.Ui.Error("token-revoke cannot be run for 'orphan' mode when 'accessor' flag is set")
+ return 1
+ case accessor && mode == "path":
+ c.Ui.Error("token-revoke cannot be run for 'path' mode when 'accessor' flag is set")
+ return 1
+ }
+
+ if err := fn(token); err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error revoking token: %s", err))
+ return 2
+ }
+
+ c.Ui.Output("Success! Token revoked if it existed.")
+ return 0
+}
+
+func (c *TokenRevokeCommand) Synopsis() string {
+ return "Revoke one or more auth tokens"
+}
+
+func (c *TokenRevokeCommand) Help() string {
+ helpText := `
+Usage: vault token-revoke [options] [token|accessor]
+
+ Revoke one or more auth tokens.
+
+ This command revokes auth tokens. Use the "revoke" command for
+ revoking secrets.
+
+ Depending on the flags used, auth tokens can be revoked in multiple ways
+ depending on the "-mode" flag:
+
+ * Without any value, the token specified and all of its children
+ will be revoked.
+
+ * With the "orphan" value, only the specific token will be revoked.
+ All of its children will be orphaned.
+
+ * With the "path" value, tokens created from the given auth path
+ prefix will be deleted, along with all their children. In this case
+ the "token" arg above is actually a "path". This mode does *not*
+ work with token values or parts of token values.
+
+ Token can be revoked using the token accessor. This can be done by
+ setting the '-accessor' flag. Note that when '-accessor' flag is set,
+ '-mode' should not be set for 'orphan' or 'path'. This is because,
+ a token accessor always revokes the token along with its child tokens.
+
+General Options:
+` + meta.GeneralOptionsUsage() + `
+Token Options:
+
+ -accessor A boolean flag, if set, treats the argument as an accessor of the token.
+ Note that accessor can also be used for looking up the token properties
+ via '/auth/token/lookup-accessor/' endpoint.
+ Accessor is used when there is no access to token ID.
+
+ -self A boolean flag, if set, the operation is performed on the currently
+ authenticated token i.e. lookup-self.
+
+ -mode=value The type of revocation to do. See the documentation
+ above for more information.
+
+`
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/token_revoke_test.go b/vendor/github.com/hashicorp/vault/command/token_revoke_test.go
new file mode 100644
index 0000000..7265a10
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/token_revoke_test.go
@@ -0,0 +1,102 @@
+package command
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestTokenRevokeAccessor(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &TokenRevokeCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ }
+
+ // Run it once for client
+ c.Run(args)
+
+ // Create a token
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ resp, err := client.Auth().Token().Create(nil)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Treat the argument as accessor
+ args = append(args, "-accessor")
+ if code := c.Run(args); code == 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ // Verify it worked with proper accessor
+ args1 := append(args, resp.Auth.Accessor)
+ if code := c.Run(args1); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ // Fail if mode is set to 'orphan' when accessor is set
+ args2 := append(args, "-mode=\"orphan\"")
+ if code := c.Run(args2); code == 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ // Fail if mode is set to 'path' when accessor is set
+ args3 := append(args, "-mode=\"path\"")
+ if code := c.Run(args3); code == 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+}
+
+func TestTokenRevoke(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &TokenRevokeCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ }
+
+ // Run it once for client
+ c.Run(args)
+
+ // Create a token
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ resp, err := client.Auth().Token().Create(nil)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Verify it worked
+ args = append(args, resp.Auth.ClientToken)
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/unmount.go b/vendor/github.com/hashicorp/vault/command/unmount.go
new file mode 100644
index 0000000..b04e532
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/unmount.go
@@ -0,0 +1,67 @@
+package command
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/meta"
+)
+
+// UnmountCommand is a Command that mounts a new mount.
+type UnmountCommand struct {
+ meta.Meta
+}
+
+func (c *UnmountCommand) Run(args []string) int {
+ flags := c.Meta.FlagSet("mount", meta.FlagSetDefault)
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ args = flags.Args()
+ if len(args) != 1 {
+ flags.Usage()
+ c.Ui.Error(fmt.Sprintf(
+ "\nunmount expects one argument: the path to unmount"))
+ return 1
+ }
+
+ path := args[0]
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ if err := client.Sys().Unmount(path); err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Unmount error: %s", err))
+ return 2
+ }
+
+ c.Ui.Output(fmt.Sprintf(
+ "Successfully unmounted '%s' if it was mounted", path))
+
+ return 0
+}
+
+func (c *UnmountCommand) Synopsis() string {
+ return "Unmount a secret backend"
+}
+
+func (c *UnmountCommand) Help() string {
+ helpText := `
+Usage: vault unmount [options] path
+
+ Unmount a secret backend.
+
+ This command unmounts a secret backend. All the secrets created
+ by this backend will be revoked and its Vault data will be deleted.
+
+General Options:
+` + meta.GeneralOptionsUsage()
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/unmount_test.go b/vendor/github.com/hashicorp/vault/command/unmount_test.go
new file mode 100644
index 0000000..1af5ef8
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/unmount_test.go
@@ -0,0 +1,47 @@
+package command
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestUnmount(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &UnmountCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "secret",
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ mounts, err := client.Sys().ListMounts()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ _, ok := mounts["secret/"]
+ if ok {
+ t.Fatal("should not have mount")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/unseal.go b/vendor/github.com/hashicorp/vault/command/unseal.go
new file mode 100644
index 0000000..2dfb947
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/unseal.go
@@ -0,0 +1,128 @@
+package command
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/hashicorp/vault/helper/password"
+ "github.com/hashicorp/vault/meta"
+)
+
+// UnsealCommand is a Command that unseals the vault.
+type UnsealCommand struct {
+ meta.Meta
+
+ // Key can be used to pre-seed the key. If it is set, it will not
+ // be asked with the `password` helper.
+ Key string
+}
+
+func (c *UnsealCommand) Run(args []string) int {
+ var reset bool
+ flags := c.Meta.FlagSet("unseal", meta.FlagSetDefault)
+ flags.BoolVar(&reset, "reset", false, "")
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ sealStatus, err := client.Sys().SealStatus()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error checking seal status: %s", err))
+ return 2
+ }
+
+ if !sealStatus.Sealed {
+ c.Ui.Output("Vault is already unsealed.")
+ return 0
+ }
+
+ args = flags.Args()
+ if reset {
+ sealStatus, err = client.Sys().ResetUnsealProcess()
+ } else {
+ value := c.Key
+ if len(args) > 0 {
+ value = args[0]
+ }
+ if value == "" {
+ fmt.Printf("Key (will be hidden): ")
+ value, err = password.Read(os.Stdin)
+ fmt.Printf("\n")
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error attempting to ask for password. The raw error message\n"+
+ "is shown below, but the most common reason for this error is\n"+
+ "that you attempted to pipe a value into unseal or you're\n"+
+ "executing `vault unseal` from outside of a terminal.\n\n"+
+ "You should use `vault unseal` from a terminal for maximum\n"+
+ "security. If this isn't an option, the unseal key can be passed\n"+
+ "in using the first parameter.\n\n"+
+ "Raw error: %s", err))
+ return 1
+ }
+ }
+ sealStatus, err = client.Sys().Unseal(strings.TrimSpace(value))
+ }
+
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error: %s", err))
+ return 1
+ }
+
+ c.Ui.Output(fmt.Sprintf(
+ "Sealed: %v\n"+
+ "Key Shares: %d\n"+
+ "Key Threshold: %d\n"+
+ "Unseal Progress: %d\n"+
+ "Unseal Nonce: %v",
+ sealStatus.Sealed,
+ sealStatus.N,
+ sealStatus.T,
+ sealStatus.Progress,
+ sealStatus.Nonce,
+ ))
+
+ return 0
+}
+
+func (c *UnsealCommand) Synopsis() string {
+ return "Unseals the Vault server"
+}
+
+func (c *UnsealCommand) Help() string {
+ helpText := `
+Usage: vault unseal [options] [key]
+
+ Unseal the vault by entering a portion of the master key. Once all
+ portions are entered, the vault will be unsealed.
+
+ Every Vault server initially starts as sealed. It cannot perform any
+ operation except unsealing until it is sealed. Secrets cannot be accessed
+ in any way until the vault is unsealed. This command allows you to enter
+ a portion of the master key to unseal the vault.
+
+ The unseal key can be specified via the command line, but this is
+ not recommended. The key may then live in your terminal history. This
+ only exists to assist in scripting.
+
+General Options:
+` + meta.GeneralOptionsUsage() + `
+Unseal Options:
+
+ -reset Reset the unsealing process by throwing away
+ prior keys in process to unseal the vault.
+
+`
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/unseal_test.go b/vendor/github.com/hashicorp/vault/command/unseal_test.go
new file mode 100644
index 0000000..699fdd8
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/unseal_test.go
@@ -0,0 +1,72 @@
+package command
+
+import (
+ "encoding/hex"
+ "testing"
+
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestUnseal(t *testing.T) {
+ core := vault.TestCore(t)
+ keys, _ := vault.TestCoreInit(t, core)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+
+ for _, key := range keys {
+ c := &UnsealCommand{
+ Key: hex.EncodeToString(key),
+ Meta: meta.Meta{
+ Ui: ui,
+ },
+ }
+
+ args := []string{"-address", addr}
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+ }
+
+ sealed, err := core.Sealed()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if sealed {
+ t.Fatal("should not be sealed")
+ }
+}
+
+func TestUnseal_arg(t *testing.T) {
+ core := vault.TestCore(t)
+ keys, _ := vault.TestCoreInit(t, core)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+
+ for _, key := range keys {
+ c := &UnsealCommand{
+ Meta: meta.Meta{
+ Ui: ui,
+ },
+ }
+
+ args := []string{"-address", addr, hex.EncodeToString(key)}
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+ }
+
+ sealed, err := core.Sealed()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if sealed {
+ t.Fatal("should not be sealed")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/unwrap.go b/vendor/github.com/hashicorp/vault/command/unwrap.go
new file mode 100644
index 0000000..5a21920
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/unwrap.go
@@ -0,0 +1,104 @@
+package command
+
+import (
+ "flag"
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/meta"
+)
+
+// UnwrapCommand is a Command that behaves like ReadCommand but specifically
+// for unwrapping cubbyhole-wrapped secrets
+type UnwrapCommand struct {
+ meta.Meta
+}
+
+func (c *UnwrapCommand) Run(args []string) int {
+ var format string
+ var field string
+ var err error
+ var secret *api.Secret
+ var flags *flag.FlagSet
+ flags = c.Meta.FlagSet("unwrap", meta.FlagSetDefault)
+ flags.StringVar(&format, "format", "table", "")
+ flags.StringVar(&field, "field", "", "")
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ var tokenID string
+
+ args = flags.Args()
+ switch len(args) {
+ case 0:
+ case 1:
+ tokenID = args[0]
+ default:
+ c.Ui.Error("unwrap expects zero or one argument (the ID of the wrapping token)")
+ flags.Usage()
+ return 1
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ secret, err = client.Logical().Unwrap(tokenID)
+ if err != nil {
+ c.Ui.Error(err.Error())
+ return 1
+ }
+ if secret == nil {
+ c.Ui.Error("Server gave empty response or secret returned was empty")
+ return 1
+ }
+
+ // Handle single field output
+ if field != "" {
+ return PrintRawField(c.Ui, secret, field)
+ }
+
+ // Check if the original was a list response and format as a list if so
+ if secret.Data != nil &&
+ len(secret.Data) == 1 &&
+ secret.Data["keys"] != nil {
+ _, ok := secret.Data["keys"].([]interface{})
+ if ok {
+ return OutputList(c.Ui, format, secret)
+ }
+ }
+ return OutputSecret(c.Ui, format, secret)
+}
+
+func (c *UnwrapCommand) Synopsis() string {
+ return "Unwrap a wrapped secret"
+}
+
+func (c *UnwrapCommand) Help() string {
+ helpText := `
+Usage: vault unwrap [options]
+
+ Unwrap a wrapped secret.
+
+ Unwraps the data wrapped by the given token ID. The returned result is the
+ same as a 'read' operation on a non-wrapped secret.
+
+General Options:
+` + meta.GeneralOptionsUsage() + `
+Read Options:
+
+ -format=table The format for output. By default it is a whitespace-
+ delimited table. This can also be json or yaml.
+
+ -field=field If included, the raw value of the specified field
+ will be output raw to stdout.
+
+`
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/unwrap_test.go b/vendor/github.com/hashicorp/vault/command/unwrap_test.go
new file mode 100644
index 0000000..e5dc0bf
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/unwrap_test.go
@@ -0,0 +1,107 @@
+package command
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestUnwrap(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &UnwrapCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "-field", "zip",
+ }
+
+ // Run once so the client is setup, ignore errors
+ c.Run(args)
+
+ // Get the client so we can write data
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ wrapLookupFunc := func(method, path string) string {
+ if method == "GET" && path == "secret/foo" {
+ return "60s"
+ }
+ if method == "LIST" && path == "secret" {
+ return "60s"
+ }
+ return ""
+ }
+ client.SetWrappingLookupFunc(wrapLookupFunc)
+
+ data := map[string]interface{}{"zip": "zap"}
+ if _, err := client.Logical().Write("secret/foo", data); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ outer, err := client.Logical().Read("secret/foo")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if outer == nil {
+ t.Fatal("outer response was nil")
+ }
+ if outer.WrapInfo == nil {
+ t.Fatalf("outer wrapinfo was nil, response was %#v", *outer)
+ }
+
+ args = append(args, outer.WrapInfo.Token)
+
+ // Run the read
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ output := ui.OutputWriter.String()
+ if output != "zap\n" {
+ t.Fatalf("unexpectd output:\n%s", output)
+ }
+
+ // Now test with list handling, specifically that it will be called with
+ // the list output formatter
+ ui.OutputWriter.Reset()
+
+ outer, err = client.Logical().List("secret")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if outer == nil {
+ t.Fatal("outer response was nil")
+ }
+ if outer.WrapInfo == nil {
+ t.Fatalf("outer wrapinfo was nil, response was %#v", *outer)
+ }
+
+ args = []string{
+ "-address", addr,
+ outer.WrapInfo.Token,
+ }
+ // Run the read
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ output = ui.OutputWriter.String()
+ if strings.TrimSpace(output) != "Keys\n----\nfoo" {
+ t.Fatalf("unexpected output:\n%s", output)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/util.go b/vendor/github.com/hashicorp/vault/command/util.go
new file mode 100644
index 0000000..0ec3916
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/util.go
@@ -0,0 +1,92 @@
+package command
+
+import (
+ "fmt"
+ "os"
+ "reflect"
+ "time"
+
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/command/token"
+ "github.com/mitchellh/cli"
+)
+
+// DefaultTokenHelper returns the token helper that is configured for Vault.
+func DefaultTokenHelper() (token.TokenHelper, error) {
+ config, err := LoadConfig("")
+ if err != nil {
+ return nil, err
+ }
+
+ path := config.TokenHelper
+ if path == "" {
+ return &token.InternalTokenHelper{}, nil
+ }
+
+ path, err = token.ExternalTokenHelperPath(path)
+ if err != nil {
+ return nil, err
+ }
+ return &token.ExternalTokenHelper{BinaryPath: path}, nil
+}
+
+func PrintRawField(ui cli.Ui, secret *api.Secret, field string) int {
+ var val interface{}
+ switch {
+ case secret.Auth != nil:
+ switch field {
+ case "token":
+ val = secret.Auth.ClientToken
+ case "token_accessor":
+ val = secret.Auth.Accessor
+ case "token_duration":
+ val = secret.Auth.LeaseDuration
+ case "token_renewable":
+ val = secret.Auth.Renewable
+ case "token_policies":
+ val = secret.Auth.Policies
+ default:
+ val = secret.Data[field]
+ }
+
+ case secret.WrapInfo != nil:
+ switch field {
+ case "wrapping_token":
+ val = secret.WrapInfo.Token
+ case "wrapping_token_ttl":
+ val = secret.WrapInfo.TTL
+ case "wrapping_token_creation_time":
+ val = secret.WrapInfo.CreationTime.Format(time.RFC3339Nano)
+ case "wrapped_accessor":
+ val = secret.WrapInfo.WrappedAccessor
+ default:
+ val = secret.Data[field]
+ }
+
+ default:
+ switch field {
+ case "refresh_interval":
+ val = secret.LeaseDuration
+ default:
+ val = secret.Data[field]
+ }
+ }
+
+ if val != nil {
+ // c.Ui.Output() prints a CR character which in this case is
+ // not desired. Since Vault CLI currently only uses BasicUi,
+ // which writes to standard output, os.Stdout is used here to
+ // directly print the message. If mitchellh/cli exposes method
+ // to print without CR, this check needs to be removed.
+ if reflect.TypeOf(ui).String() == "*cli.BasicUi" {
+ fmt.Fprintf(os.Stdout, "%v", val)
+ } else {
+ ui.Output(fmt.Sprintf("%v", val))
+ }
+ return 0
+ } else {
+ ui.Error(fmt.Sprintf(
+ "Field %s not present in secret", field))
+ return 1
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/version.go b/vendor/github.com/hashicorp/vault/command/version.go
new file mode 100644
index 0000000..4665436
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/version.go
@@ -0,0 +1,29 @@
+package command
+
+import (
+ "github.com/hashicorp/vault/version"
+ "github.com/mitchellh/cli"
+)
+
+// VersionCommand is a Command implementation prints the version.
+type VersionCommand struct {
+ VersionInfo *version.VersionInfo
+ Ui cli.Ui
+}
+
+func (c *VersionCommand) Help() string {
+ return ""
+}
+
+func (c *VersionCommand) Run(_ []string) int {
+ out := c.VersionInfo.FullVersionNumber(true)
+ if version.CgoEnabled {
+ out += " (cgo)"
+ }
+ c.Ui.Output(out)
+ return 0
+}
+
+func (c *VersionCommand) Synopsis() string {
+ return "Prints the Vault version"
+}
diff --git a/vendor/github.com/hashicorp/vault/command/version_test.go b/vendor/github.com/hashicorp/vault/command/version_test.go
new file mode 100644
index 0000000..2a64569
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/version_test.go
@@ -0,0 +1,11 @@
+package command
+
+import (
+ "testing"
+
+ "github.com/mitchellh/cli"
+)
+
+func TestVersionCommand_implements(t *testing.T) {
+ var _ cli.Command = &VersionCommand{}
+}
diff --git a/vendor/github.com/hashicorp/vault/command/wrapping_test.go b/vendor/github.com/hashicorp/vault/command/wrapping_test.go
new file mode 100644
index 0000000..a380cfc
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/wrapping_test.go
@@ -0,0 +1,109 @@
+package command
+
+import (
+ "os"
+ "testing"
+
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestWrapping_Env(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &TokenLookupCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ }
+ // Run it once for client
+ c.Run(args)
+
+ // Create a new token for us to use
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ resp, err := client.Auth().Token().Create(&api.TokenCreateRequest{
+ Lease: "1h",
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ prevWrapTTLEnv := os.Getenv(api.EnvVaultWrapTTL)
+ os.Setenv(api.EnvVaultWrapTTL, "5s")
+ defer func() {
+ os.Setenv(api.EnvVaultWrapTTL, prevWrapTTLEnv)
+ }()
+
+ // Now when we do a lookup-self the response should be wrapped
+ args = append(args, resp.Auth.ClientToken)
+
+ resp, err = client.Auth().Token().LookupSelf()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if resp == nil {
+ t.Fatal("nil response")
+ }
+ if resp.WrapInfo == nil {
+ t.Fatal("nil wrap info")
+ }
+ if resp.WrapInfo.Token == "" || resp.WrapInfo.TTL != 5 {
+ t.Fatal("did not get token or ttl wrong")
+ }
+}
+
+func TestWrapping_Flag(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &TokenLookupCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "-wrap-ttl", "5s",
+ }
+ // Run it once for client
+ c.Run(args)
+
+ // Create a new token for us to use
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ resp, err := client.Auth().Token().Create(&api.TokenCreateRequest{
+ Lease: "1h",
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if resp == nil {
+ t.Fatal("nil response")
+ }
+ if resp.WrapInfo == nil {
+ t.Fatal("nil wrap info")
+ }
+ if resp.WrapInfo.Token == "" || resp.WrapInfo.TTL != 5 {
+ t.Fatal("did not get token or ttl wrong")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/write.go b/vendor/github.com/hashicorp/vault/command/write.go
new file mode 100644
index 0000000..0614f9b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/write.go
@@ -0,0 +1,135 @@
+package command
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "github.com/hashicorp/vault/helper/kv-builder"
+ "github.com/hashicorp/vault/meta"
+)
+
+// WriteCommand is a Command that puts data into the Vault.
+type WriteCommand struct {
+ meta.Meta
+
+ // The fields below can be overwritten for tests
+ testStdin io.Reader
+}
+
+func (c *WriteCommand) Run(args []string) int {
+ var field, format string
+ var force bool
+ flags := c.Meta.FlagSet("write", meta.FlagSetDefault)
+ flags.StringVar(&format, "format", "table", "")
+ flags.StringVar(&field, "field", "", "")
+ flags.BoolVar(&force, "force", false, "")
+ flags.BoolVar(&force, "f", false, "")
+ flags.Usage = func() { c.Ui.Error(c.Help()) }
+ if err := flags.Parse(args); err != nil {
+ return 1
+ }
+
+ args = flags.Args()
+ if len(args) < 2 && !force {
+ c.Ui.Error("write expects at least two arguments; use -f to perform the write anyways")
+ flags.Usage()
+ return 1
+ }
+
+ path := args[0]
+ if path[0] == '/' {
+ path = path[1:]
+ }
+
+ data, err := c.parseData(args[1:])
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error loading data: %s", err))
+ return 1
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error initializing client: %s", err))
+ return 2
+ }
+
+ secret, err := client.Logical().Write(path, data)
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error writing data to %s: %s", path, err))
+ return 1
+ }
+
+ if secret == nil {
+ // Don't output anything if people aren't using the "human" output
+ if format == "table" {
+ c.Ui.Output(fmt.Sprintf("Success! Data written to: %s", path))
+ }
+ return 0
+ }
+
+ // Handle single field output
+ if field != "" {
+ return PrintRawField(c.Ui, secret, field)
+ }
+
+ return OutputSecret(c.Ui, format, secret)
+}
+
+func (c *WriteCommand) parseData(args []string) (map[string]interface{}, error) {
+ var stdin io.Reader = os.Stdin
+ if c.testStdin != nil {
+ stdin = c.testStdin
+ }
+
+ builder := &kvbuilder.Builder{Stdin: stdin}
+ if err := builder.Add(args...); err != nil {
+ return nil, err
+ }
+
+ return builder.Map(), nil
+}
+
+func (c *WriteCommand) Synopsis() string {
+ return "Write secrets or configuration into Vault"
+}
+
+func (c *WriteCommand) Help() string {
+ helpText := `
+Usage: vault write [options] path [data]
+
+ Write data (secrets or configuration) into Vault.
+
+ Write sends data into Vault at the given path. The behavior of the write is
+ determined by the backend at the given path. For example, writing to
+ "aws/policy/ops" will create an "ops" IAM policy for the AWS backend
+ (configuration), but writing to "consul/foo" will write a value directly into
+ Consul at that key. Check the documentation of the logical backend you're
+ using for more information on key structure.
+
+ Data is sent via additional arguments in "key=value" pairs. If value begins
+ with an "@", then it is loaded from a file. Write expects data in the file to
+ be in JSON format. If you want to start the value with a literal "@", then
+ prefix the "@" with a slash: "\@".
+
+General Options:
+` + meta.GeneralOptionsUsage() + `
+Write Options:
+
+ -f | -force Force the write to continue without any data values
+ specified. This allows writing to keys that do not
+ need or expect any fields to be specified.
+
+ -format=table The format for output. By default it is a whitespace-
+ delimited table. This can also be json or yaml.
+
+ -field=field If included, the raw value of the specified field
+ will be output raw to stdout.
+
+`
+ return strings.TrimSpace(helpText)
+}
diff --git a/vendor/github.com/hashicorp/vault/command/write_test.go b/vendor/github.com/hashicorp/vault/command/write_test.go
new file mode 100644
index 0000000..786bbc3
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/command/write_test.go
@@ -0,0 +1,272 @@
+package command
+
+import (
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/vault"
+ "github.com/mitchellh/cli"
+)
+
+func TestWrite(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &WriteCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "secret/foo",
+ "value=bar",
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ resp, err := client.Logical().Read("secret/foo")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if resp.Data["value"] != "bar" {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
+
+func TestWrite_arbitrary(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ stdinR, stdinW := io.Pipe()
+ ui := new(cli.MockUi)
+ c := &WriteCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+
+ testStdin: stdinR,
+ }
+
+ go func() {
+ stdinW.Write([]byte(`{"foo":"bar"}`))
+ stdinW.Close()
+ }()
+
+ args := []string{
+ "-address", addr,
+ "secret/foo",
+ "-",
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ resp, err := client.Logical().Read("secret/foo")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if resp.Data["foo"] != "bar" {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
+
+func TestWrite_escaped(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &WriteCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "secret/foo",
+ "value=\\@bar",
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ resp, err := client.Logical().Read("secret/foo")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if resp.Data["value"] != "@bar" {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
+
+func TestWrite_file(t *testing.T) {
+ tf, err := ioutil.TempFile("", "vault")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ tf.Write([]byte(`{"foo":"bar"}`))
+ tf.Close()
+ defer os.Remove(tf.Name())
+
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &WriteCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "secret/foo",
+ "@" + tf.Name(),
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ resp, err := client.Logical().Read("secret/foo")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if resp.Data["foo"] != "bar" {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
+
+func TestWrite_fileValue(t *testing.T) {
+ tf, err := ioutil.TempFile("", "vault")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ tf.Write([]byte("foo"))
+ tf.Close()
+ defer os.Remove(tf.Name())
+
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &WriteCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "secret/foo",
+ "value=@" + tf.Name(),
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ resp, err := client.Logical().Read("secret/foo")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if resp.Data["value"] != "foo" {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
+
+func TestWrite_Output(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &WriteCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "auth/token/create",
+ "display_name=foo",
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+ if !strings.Contains(string(ui.OutputWriter.Bytes()), "Key") {
+ t.Fatalf("bad: %s", string(ui.OutputWriter.Bytes()))
+ }
+}
+
+func TestWrite_force(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &WriteCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "-force",
+ "sys/rotate",
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/awsutil/generate_credentials.go b/vendor/github.com/hashicorp/vault/helper/awsutil/generate_credentials.go
new file mode 100644
index 0000000..7399a5c
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/awsutil/generate_credentials.go
@@ -0,0 +1,84 @@
+package awsutil
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
+ "github.com/aws/aws-sdk-go/aws/ec2metadata"
+ "github.com/aws/aws-sdk-go/aws/session"
+)
+
+type CredentialsConfig struct {
+ // The access key if static credentials are being used
+ AccessKey string
+
+ // The secret key if static credentials are being used
+ SecretKey string
+
+ // The session token if it is being used
+ SessionToken string
+
+ // If specified, the region will be provided to the config of the
+ // EC2RoleProvider's client. This may be useful if you want to e.g. reuse
+ // the client elsewhere.
+ Region string
+
+ // The filename for the shared credentials provider, if being used
+ Filename string
+
+ // The profile for the shared credentials provider, if being used
+ Profile string
+
+ // The http.Client to use, or nil for the client to use its default
+ HTTPClient *http.Client
+}
+
+func (c *CredentialsConfig) GenerateCredentialChain() (*credentials.Credentials, error) {
+ var providers []credentials.Provider
+
+ switch {
+ case c.AccessKey != "" && c.SecretKey != "":
+ // Add the static credential provider
+ providers = append(providers, &credentials.StaticProvider{
+ Value: credentials.Value{
+ AccessKeyID: c.AccessKey,
+ SecretAccessKey: c.SecretKey,
+ SessionToken: c.SessionToken,
+ }})
+ case c.AccessKey == "" && c.SecretKey == "":
+ // Attempt to get credentials from the IAM instance role below
+
+ default: // Have one or the other but not both and not neither
+ return nil, fmt.Errorf(
+ "static AWS client credentials haven't been properly configured (the access key or secret key were provided but not both)")
+ }
+
+ // Add the environment credential provider
+ providers = append(providers, &credentials.EnvProvider{})
+
+ // Add the shared credentials provider
+ providers = append(providers, &credentials.SharedCredentialsProvider{
+ Filename: c.Filename,
+ Profile: c.Profile,
+ })
+
+ // Add the instance metadata role provider
+ providers = append(providers, &ec2rolecreds.EC2RoleProvider{
+ Client: ec2metadata.New(session.New(&aws.Config{
+ Region: aws.String(c.Region),
+ HTTPClient: c.HTTPClient,
+ })),
+ ExpiryWindow: 15,
+ })
+
+ // Create the credentials required to access the API.
+ creds := credentials.NewChainCredentials(providers)
+ if creds == nil {
+ return nil, fmt.Errorf("could not compile valid credential providers from static config, environemnt, shared, or instance metadata")
+ }
+
+ return creds, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/certutil/certutil_test.go b/vendor/github.com/hashicorp/vault/helper/certutil/certutil_test.go
new file mode 100644
index 0000000..06769ff
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/certutil/certutil_test.go
@@ -0,0 +1,666 @@
+package certutil
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "testing"
+
+ "github.com/fatih/structs"
+ "github.com/hashicorp/vault/api"
+)
+
+// Tests converting back and forth between a CertBundle and a ParsedCertBundle.
+//
+// Also tests the GetSubjKeyID, GetHexFormatted, and
+// ParsedCertBundle.getSigner functions.
+func TestCertBundleConversion(t *testing.T) {
+ cbuts := []*CertBundle{
+ refreshRSACertBundle(),
+ refreshRSACertBundleWithChain(),
+ refreshRSA8CertBundle(),
+ refreshRSA8CertBundleWithChain(),
+ refreshECCertBundle(),
+ refreshECCertBundleWithChain(),
+ refreshEC8CertBundle(),
+ refreshEC8CertBundleWithChain(),
+ }
+
+ for i, cbut := range cbuts {
+ pcbut, err := cbut.ToParsedCertBundle()
+ if err != nil {
+ t.Logf("Error occurred with bundle %d in test array (index %d).\n", i+1, i)
+ t.Errorf("Error converting to parsed cert bundle: %s", err)
+ continue
+ }
+
+ err = compareCertBundleToParsedCertBundle(cbut, pcbut)
+ if err != nil {
+ t.Logf("Error occurred with bundle %d in test array (index %d).\n", i+1, i)
+ t.Errorf(err.Error())
+ }
+
+ cbut, err := pcbut.ToCertBundle()
+ if err != nil {
+ t.Fatalf("Error converting to cert bundle: %s", err)
+ }
+
+ err = compareCertBundleToParsedCertBundle(cbut, pcbut)
+ if err != nil {
+ t.Fatalf(err.Error())
+ }
+ }
+}
+
+func BenchmarkCertBundleParsing(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ cbuts := []*CertBundle{
+ refreshRSACertBundle(),
+ refreshRSACertBundleWithChain(),
+ refreshRSA8CertBundle(),
+ refreshRSA8CertBundleWithChain(),
+ refreshECCertBundle(),
+ refreshECCertBundleWithChain(),
+ refreshEC8CertBundle(),
+ refreshEC8CertBundleWithChain(),
+ }
+
+ for i, cbut := range cbuts {
+ pcbut, err := cbut.ToParsedCertBundle()
+ if err != nil {
+ b.Logf("Error occurred with bundle %d in test array (index %d).\n", i+1, i)
+ b.Errorf("Error converting to parsed cert bundle: %s", err)
+ continue
+ }
+
+ cbut, err = pcbut.ToCertBundle()
+ if err != nil {
+ b.Fatalf("Error converting to cert bundle: %s", err)
+ }
+ }
+ }
+}
+
+func TestCertBundleParsing(t *testing.T) {
+ cbuts := []*CertBundle{
+ refreshRSACertBundle(),
+ refreshRSACertBundleWithChain(),
+ refreshRSA8CertBundle(),
+ refreshRSA8CertBundleWithChain(),
+ refreshECCertBundle(),
+ refreshECCertBundleWithChain(),
+ refreshEC8CertBundle(),
+ refreshEC8CertBundleWithChain(),
+ }
+
+ for i, cbut := range cbuts {
+ jsonString, err := json.Marshal(cbut)
+ if err != nil {
+ t.Logf("Error occurred with bundle %d in test array (index %d).\n", i+1, i)
+ t.Fatalf("Error marshaling testing certbundle to JSON: %s", err)
+ }
+ pcbut, err := ParsePKIJSON(jsonString)
+ if err != nil {
+ t.Logf("Error occurred with bundle %d in test array (index %d).\n", i+1, i)
+ t.Fatalf("Error during JSON bundle handling: %s", err)
+ }
+ err = compareCertBundleToParsedCertBundle(cbut, pcbut)
+ if err != nil {
+ t.Logf("Error occurred with bundle %d in test array (index %d).\n", i+1, i)
+ t.Fatalf(err.Error())
+ }
+
+ secret := &api.Secret{
+ Data: structs.New(cbut).Map(),
+ }
+ pcbut, err = ParsePKIMap(secret.Data)
+ if err != nil {
+ t.Logf("Error occurred with bundle %d in test array (index %d).\n", i+1, i)
+ t.Fatalf("Error during JSON bundle handling: %s", err)
+ }
+ err = compareCertBundleToParsedCertBundle(cbut, pcbut)
+ if err != nil {
+ t.Logf("Error occurred with bundle %d in test array (index %d).\n", i+1, i)
+ t.Fatalf(err.Error())
+ }
+
+ pcbut, err = ParsePEMBundle(cbut.ToPEMBundle())
+ if err != nil {
+ t.Logf("Error occurred with bundle %d in test array (index %d).\n", i+1, i)
+ t.Fatalf("Error during JSON bundle handling: %s", err)
+ }
+ err = compareCertBundleToParsedCertBundle(cbut, pcbut)
+ if err != nil {
+ t.Logf("Error occurred with bundle %d in test array (index %d).\n", i+1, i)
+ t.Fatalf(err.Error())
+ }
+ }
+}
+
+func compareCertBundleToParsedCertBundle(cbut *CertBundle, pcbut *ParsedCertBundle) error {
+ if cbut == nil {
+ return fmt.Errorf("Got nil bundle")
+ }
+ if pcbut == nil {
+ return fmt.Errorf("Got nil parsed bundle")
+ }
+
+ switch {
+ case pcbut.Certificate == nil:
+ return fmt.Errorf("Parsed bundle has nil certificate")
+ case pcbut.PrivateKey == nil:
+ return fmt.Errorf("Parsed bundle has nil private key")
+ }
+
+ switch cbut.PrivateKey {
+ case privRSAKeyPem:
+ if pcbut.PrivateKeyType != RSAPrivateKey {
+ return fmt.Errorf("Parsed bundle has wrong private key type: %v, should be 'rsa' (%v)", pcbut.PrivateKeyType, RSAPrivateKey)
+ }
+ case privRSA8KeyPem:
+ if pcbut.PrivateKeyType != RSAPrivateKey {
+ return fmt.Errorf("Parsed bundle has wrong pkcs8 private key type: %v, should be 'rsa' (%v)", pcbut.PrivateKeyType, RSAPrivateKey)
+ }
+ case privECKeyPem:
+ if pcbut.PrivateKeyType != ECPrivateKey {
+ return fmt.Errorf("Parsed bundle has wrong private key type: %v, should be 'ec' (%v)", pcbut.PrivateKeyType, ECPrivateKey)
+ }
+ case privEC8KeyPem:
+ if pcbut.PrivateKeyType != ECPrivateKey {
+ return fmt.Errorf("Parsed bundle has wrong pkcs8 private key type: %v, should be 'ec' (%v)", pcbut.PrivateKeyType, ECPrivateKey)
+ }
+ default:
+ return fmt.Errorf("Parsed bundle has unknown private key type")
+ }
+
+ subjKeyID, err := GetSubjKeyID(pcbut.PrivateKey)
+ if err != nil {
+ return fmt.Errorf("Error when getting subject key id: %s", err)
+ }
+ if bytes.Compare(subjKeyID, pcbut.Certificate.SubjectKeyId) != 0 {
+ return fmt.Errorf("Parsed bundle private key does not match subject key id")
+ }
+
+ switch {
+ case len(pcbut.CAChain) > 0 && len(cbut.CAChain) == 0:
+ return fmt.Errorf("Parsed bundle ca chain has certs when cert bundle does not")
+ case len(pcbut.CAChain) == 0 && len(cbut.CAChain) > 0:
+ return fmt.Errorf("Cert bundle ca chain has certs when parsed cert bundle does not")
+ }
+
+ cb, err := pcbut.ToCertBundle()
+ if err != nil {
+ return fmt.Errorf("Thrown error during parsed bundle conversion: %s\n\nInput was: %#v", err, *pcbut)
+ }
+
+ switch {
+ case len(cb.Certificate) == 0:
+ return fmt.Errorf("Bundle has nil certificate")
+ case len(cb.PrivateKey) == 0:
+ return fmt.Errorf("Bundle has nil private key")
+ case len(cb.CAChain[0]) == 0:
+ return fmt.Errorf("Bundle has nil issuing CA")
+ }
+
+ switch pcbut.PrivateKeyType {
+ case RSAPrivateKey:
+ if cb.PrivateKey != privRSAKeyPem && cb.PrivateKey != privRSA8KeyPem {
+ return fmt.Errorf("Bundle private key does not match")
+ }
+ case ECPrivateKey:
+ if cb.PrivateKey != privECKeyPem && cb.PrivateKey != privEC8KeyPem {
+ return fmt.Errorf("Bundle private key does not match")
+ }
+ default:
+ return fmt.Errorf("CertBundle has unknown private key type")
+ }
+
+ if cb.SerialNumber != GetHexFormatted(pcbut.Certificate.SerialNumber.Bytes(), ":") {
+ return fmt.Errorf("Bundle serial number does not match")
+ }
+
+ switch {
+ case len(pcbut.CAChain) > 0 && len(cb.CAChain) == 0:
+ return fmt.Errorf("Parsed bundle ca chain has certs when cert bundle does not")
+ case len(pcbut.CAChain) == 0 && len(cb.CAChain) > 0:
+ return fmt.Errorf("Cert bundle ca chain has certs when parsed cert bundle does not")
+ case !reflect.DeepEqual(cbut.CAChain, cb.CAChain):
+ return fmt.Errorf("Cert bundle ca chain does not match: %#v\n\n%#v", cbut.CAChain, cb.CAChain)
+ }
+
+ return nil
+}
+
+func TestCSRBundleConversion(t *testing.T) {
+ csrbuts := []*CSRBundle{
+ refreshRSACSRBundle(),
+ refreshECCSRBundle(),
+ }
+
+ for _, csrbut := range csrbuts {
+ pcsrbut, err := csrbut.ToParsedCSRBundle()
+ if err != nil {
+ t.Fatalf("Error converting to parsed CSR bundle: %v", err)
+ }
+
+ err = compareCSRBundleToParsedCSRBundle(csrbut, pcsrbut)
+ if err != nil {
+ t.Fatalf(err.Error())
+ }
+
+ csrbut, err = pcsrbut.ToCSRBundle()
+ if err != nil {
+ t.Fatalf("Error converting to CSR bundle: %v", err)
+ }
+
+ err = compareCSRBundleToParsedCSRBundle(csrbut, pcsrbut)
+ if err != nil {
+ t.Fatalf(err.Error())
+ }
+ }
+}
+
+func compareCSRBundleToParsedCSRBundle(csrbut *CSRBundle, pcsrbut *ParsedCSRBundle) error {
+ if csrbut == nil {
+ return fmt.Errorf("Got nil bundle")
+ }
+ if pcsrbut == nil {
+ return fmt.Errorf("Got nil parsed bundle")
+ }
+
+ switch {
+ case pcsrbut.CSR == nil:
+ return fmt.Errorf("Parsed bundle has nil csr")
+ case pcsrbut.PrivateKey == nil:
+ return fmt.Errorf("Parsed bundle has nil private key")
+ }
+
+ switch csrbut.PrivateKey {
+ case privRSAKeyPem:
+ if pcsrbut.PrivateKeyType != RSAPrivateKey {
+ return fmt.Errorf("Parsed bundle has wrong private key type")
+ }
+ case privECKeyPem:
+ if pcsrbut.PrivateKeyType != ECPrivateKey {
+ return fmt.Errorf("Parsed bundle has wrong private key type")
+ }
+ default:
+ return fmt.Errorf("Parsed bundle has unknown private key type")
+ }
+
+ csrb, err := pcsrbut.ToCSRBundle()
+ if err != nil {
+ return fmt.Errorf("Thrown error during parsed bundle conversion: %s\n\nInput was: %#v", err, *pcsrbut)
+ }
+
+ switch {
+ case len(csrb.CSR) == 0:
+ return fmt.Errorf("Bundle has nil certificate")
+ case len(csrb.PrivateKey) == 0:
+ return fmt.Errorf("Bundle has nil private key")
+ }
+
+ switch csrb.PrivateKeyType {
+ case "rsa":
+ if pcsrbut.PrivateKeyType != RSAPrivateKey {
+ return fmt.Errorf("Bundle has wrong private key type")
+ }
+ if csrb.PrivateKey != privRSAKeyPem {
+ return fmt.Errorf("Bundle private key does not match")
+ }
+ case "ec":
+ if pcsrbut.PrivateKeyType != ECPrivateKey {
+ return fmt.Errorf("Bundle has wrong private key type")
+ }
+ if csrb.PrivateKey != privECKeyPem {
+ return fmt.Errorf("Bundle private key does not match")
+ }
+ default:
+ return fmt.Errorf("Bundle has unknown private key type")
+ }
+
+ return nil
+}
+
+func TestTLSConfig(t *testing.T) {
+ cbut := refreshRSACertBundle()
+
+ pcbut, err := cbut.ToParsedCertBundle()
+ if err != nil {
+ t.Fatalf("Error getting parsed cert bundle: %s", err)
+ }
+
+ usages := []TLSUsage{
+ TLSUnknown,
+ TLSClient,
+ TLSServer,
+ TLSClient | TLSServer,
+ }
+
+ for _, usage := range usages {
+ tlsConfig, err := pcbut.GetTLSConfig(usage)
+ if err != nil {
+ t.Fatalf("Error getting tls config: %s", err)
+ }
+ if tlsConfig == nil {
+ t.Fatalf("Got nil tls.Config")
+ }
+
+ if len(tlsConfig.Certificates) != 1 {
+ t.Fatalf("Unexpected length in config.Certificates")
+ }
+
+ // Length should be 2, since we passed in a CA
+ if len(tlsConfig.Certificates[0].Certificate) != 2 {
+ t.Fatalf("Did not find both certificates in config.Certificates.Certificate")
+ }
+
+ if tlsConfig.Certificates[0].Leaf != pcbut.Certificate {
+ t.Fatalf("Leaf certificate does not match parsed bundle's certificate")
+ }
+
+ if tlsConfig.Certificates[0].PrivateKey != pcbut.PrivateKey {
+ t.Fatalf("Config's private key does not match parsed bundle's private key")
+ }
+
+ switch usage {
+ case TLSServer | TLSClient:
+ if len(tlsConfig.ClientCAs.Subjects()) != 1 || bytes.Compare(tlsConfig.ClientCAs.Subjects()[0], pcbut.CAChain[0].Certificate.RawSubject) != 0 {
+ t.Fatalf("CA certificate not in client cert pool as expected")
+ }
+ if len(tlsConfig.RootCAs.Subjects()) != 1 || bytes.Compare(tlsConfig.RootCAs.Subjects()[0], pcbut.CAChain[0].Certificate.RawSubject) != 0 {
+ t.Fatalf("CA certificate not in root cert pool as expected")
+ }
+ case TLSServer:
+ if len(tlsConfig.ClientCAs.Subjects()) != 1 || bytes.Compare(tlsConfig.ClientCAs.Subjects()[0], pcbut.CAChain[0].Certificate.RawSubject) != 0 {
+ t.Fatalf("CA certificate not in client cert pool as expected")
+ }
+ if tlsConfig.RootCAs != nil {
+ t.Fatalf("Found root pools in config object when not expected")
+ }
+ case TLSClient:
+ if len(tlsConfig.RootCAs.Subjects()) != 1 || bytes.Compare(tlsConfig.RootCAs.Subjects()[0], pcbut.CAChain[0].Certificate.RawSubject) != 0 {
+ t.Fatalf("CA certificate not in root cert pool as expected")
+ }
+ if tlsConfig.ClientCAs != nil {
+ t.Fatalf("Found root pools in config object when not expected")
+ }
+ default:
+ if tlsConfig.RootCAs != nil || tlsConfig.ClientCAs != nil {
+ t.Fatalf("Found root pools in config object when not expected")
+ }
+ }
+ }
+}
+
+func refreshRSA8CertBundle() *CertBundle {
+ return &CertBundle{
+ Certificate: certRSAPem,
+ PrivateKey: privRSA8KeyPem,
+ CAChain: []string{issuingCaChainPem[0]},
+ }
+}
+
+func refreshRSA8CertBundleWithChain() *CertBundle {
+ ret := refreshRSA8CertBundle()
+ ret.CAChain = issuingCaChainPem
+ return ret
+}
+
+func refreshRSACertBundle() *CertBundle {
+ return &CertBundle{
+ Certificate: certRSAPem,
+ CAChain: []string{issuingCaChainPem[0]},
+ PrivateKey: privRSAKeyPem,
+ }
+}
+
+func refreshRSACertBundleWithChain() *CertBundle {
+ ret := refreshRSACertBundle()
+ ret.CAChain = issuingCaChainPem
+ return ret
+}
+
+func refreshECCertBundle() *CertBundle {
+ return &CertBundle{
+ Certificate: certECPem,
+ CAChain: []string{issuingCaChainPem[0]},
+ PrivateKey: privECKeyPem,
+ }
+}
+
+func refreshECCertBundleWithChain() *CertBundle {
+ ret := refreshECCertBundle()
+ ret.CAChain = issuingCaChainPem
+ return ret
+}
+
+func refreshRSACSRBundle() *CSRBundle {
+ return &CSRBundle{
+ CSR: csrRSAPem,
+ PrivateKey: privRSAKeyPem,
+ }
+}
+
+func refreshECCSRBundle() *CSRBundle {
+ return &CSRBundle{
+ CSR: csrECPem,
+ PrivateKey: privECKeyPem,
+ }
+}
+
+func refreshEC8CertBundle() *CertBundle {
+ return &CertBundle{
+ Certificate: certECPem,
+ PrivateKey: privEC8KeyPem,
+ CAChain: []string{issuingCaChainPem[0]},
+ }
+}
+
+func refreshEC8CertBundleWithChain() *CertBundle {
+ ret := refreshEC8CertBundle()
+ ret.CAChain = issuingCaChainPem
+ return ret
+}
+
+var (
+ privRSA8KeyPem = `-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC92mr7+D/tGkW5
+nvDH/fYkOLywbxsU9wU7lKVPCdj+zNzQYHiixTZtmZPYVTBj27lZgaUDUXuiw9Ru
+BWHTuAb/Cpn1I+71qJbh8FgWot+MRDFKuV0PLkgHz5eRVC4JmKy9hbcgo1q0FfGf
+qxL+VQmI0GcQ4IYK/ppVMrKbn4ndIg70uR46vPiU11GqRIz5wkiPeLklrhoWa5qE
+IHINnR83eHUbijaCuqPEcz0QTz0iLM8dfubVaJ+Gn/DseUtku+qBdcYcUK2hQyCc
+NRKtu953gr5hhEX0N9x5JBb10WaI1UL5HGa2wa6ndZ7yVb42B2WTHzNQRR5Cr4N4
+ve31//gRAgMBAAECggEAfEvyvTLDz5zix2tK4vTfYMmQp8amKWysjVx9eijNW8yO
+SRLQCGkrgEgLJphnjQk+6V3axjhjxKWHf9ygNrgGRJYRRBCZk1YkKpprYa6Sw0em
+KfD//z9iw1JjPi+p0HiXp6FSytiIOt0fC1U6oy7ThjJDOCZ3O92C94KwsviZjx9r
+DZbTLDm7Ya2LF4jGCq0dQ+AVqZ65QJ3yjdxm87PSE6q2eiV9wdMUx9RDOmFy+Meq
+Mm3L9TW1QzyFtFMXeIF5QYGpmxWP/iii5V0CP573apXMIqQ+wTNpwK3WU5iURypZ
+kJ1Iaxbzjfok6wpwLj7SJytF+fOVcygUxud7GPH8UQKBgQDPhQhB3+o+y+bwkUTx
+Qdj/YNKcA/bjo/b9KMq+3rufwN9u/DK5z7vVfVklidbh5DVqhlLREsdSuZvb/IHc
+OdCYwNeDxk1rLr+1W/iPYSBJod4eWDteIH1U9lts+/mH+u+iSsWVuikbeA8/MUJ3
+nnAYu4FR1nz8I/CrvGbQL/KCdQKBgQDqNNI562Ch+4dJ407F3MC4gNPwPgksfLXn
+ZRcPVVwGagil9oIIte0BIT0rAG/jVACfghGxfrj719uwjcFFxnUaSHGQcATseSf6
+SgoruIVF15lI4e8lEcWrOypsW8Id2/amwUiIWYCgwlYG2Q7dggpXfgjmKfjSlvJ8
++yKR/Y6zrQKBgQCkx2aqICm5mWEUbtWGmJm9Ft3FQqSdV4n8tZJgAy6KiLUiRKHm
+x1vIBtNtqkj1b6c2odhK6ZVaS8XF5XgcLdBEKwQ2P5Uj4agaUyBIgYAI174u7DKf
+6D58423vWRln70qu3J6N6JdRl4DL1cqIf0dVbDYgjKcL82HcjCo7b4cqLQKBgFGU
+TJX4MxS5NIq8LrglCMw7s5c/RJrGZeZQBBRHO2LQlGqazviRxhhap5/O6ypYHE9z
+Uw5sgarXqaJ5/hR76FZbXZNeMZjdKtu35osMHwAQ9Ue5yz8yTZQza7eKzrbv4556
+PPWhl3hnuOdxvAfUQB3xvM/PVuijw5tdLtGDbK2RAoGBAKB7OsTgF7wVEkzccJTE
+hrbVKD+KBZz8WKnEgNoyyTIT0Kaugk15MCXkGrXIY8bW0IzYAw69qhTOgaWkcu4E
+JbTK5UerP8V+X7XPBiw72StPVM4bxaXx2/B+78IuMOI/GR0tHQCF8S6DwTHeBXnl
+ke8GFExnXHTPqG6Bku0r/G47
+-----END PRIVATE KEY-----`
+
+ privRSAKeyPem = `-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAvdpq+/g/7RpFuZ7wx/32JDi8sG8bFPcFO5SlTwnY/szc0GB4
+osU2bZmT2FUwY9u5WYGlA1F7osPUbgVh07gG/wqZ9SPu9aiW4fBYFqLfjEQxSrld
+Dy5IB8+XkVQuCZisvYW3IKNatBXxn6sS/lUJiNBnEOCGCv6aVTKym5+J3SIO9Lke
+Orz4lNdRqkSM+cJIj3i5Ja4aFmuahCByDZ0fN3h1G4o2grqjxHM9EE89IizPHX7m
+1Wifhp/w7HlLZLvqgXXGHFCtoUMgnDUSrbved4K+YYRF9DfceSQW9dFmiNVC+Rxm
+tsGup3We8lW+Ngdlkx8zUEUeQq+DeL3t9f/4EQIDAQABAoIBAHxL8r0yw8+c4sdr
+SuL032DJkKfGpilsrI1cfXoozVvMjkkS0AhpK4BICyaYZ40JPuld2sY4Y8Slh3/c
+oDa4BkSWEUQQmZNWJCqaa2GuksNHpinw//8/YsNSYz4vqdB4l6ehUsrYiDrdHwtV
+OqMu04YyQzgmdzvdgveCsLL4mY8faw2W0yw5u2GtixeIxgqtHUPgFameuUCd8o3c
+ZvOz0hOqtnolfcHTFMfUQzphcvjHqjJty/U1tUM8hbRTF3iBeUGBqZsVj/4oouVd
+Aj+e92qVzCKkPsEzacCt1lOYlEcqWZCdSGsW8436JOsKcC4+0icrRfnzlXMoFMbn
+exjx/FECgYEAz4UIQd/qPsvm8JFE8UHY/2DSnAP246P2/SjKvt67n8Dfbvwyuc+7
+1X1ZJYnW4eQ1aoZS0RLHUrmb2/yB3DnQmMDXg8ZNay6/tVv4j2EgSaHeHlg7XiB9
+VPZbbPv5h/rvokrFlbopG3gPPzFCd55wGLuBUdZ8/CPwq7xm0C/ygnUCgYEA6jTS
+OetgofuHSeNOxdzAuIDT8D4JLHy152UXD1VcBmoIpfaCCLXtASE9KwBv41QAn4IR
+sX64+9fbsI3BRcZ1GkhxkHAE7Hkn+koKK7iFRdeZSOHvJRHFqzsqbFvCHdv2psFI
+iFmAoMJWBtkO3YIKV34I5in40pbyfPsikf2Os60CgYEApMdmqiApuZlhFG7VhpiZ
+vRbdxUKknVeJ/LWSYAMuioi1IkSh5sdbyAbTbapI9W+nNqHYSumVWkvFxeV4HC3Q
+RCsENj+VI+GoGlMgSIGACNe+Luwyn+g+fONt71kZZ+9KrtyejeiXUZeAy9XKiH9H
+VWw2IIynC/Nh3IwqO2+HKi0CgYBRlEyV+DMUuTSKvC64JQjMO7OXP0SaxmXmUAQU
+Rzti0JRqms74kcYYWqefzusqWBxPc1MObIGq16mief4Ue+hWW12TXjGY3Srbt+aL
+DB8AEPVHucs/Mk2UM2u3is627+Oeejz1oZd4Z7jncbwH1EAd8bzPz1boo8ObXS7R
+g2ytkQKBgQCgezrE4Be8FRJM3HCUxIa21Sg/igWc/FipxIDaMskyE9CmroJNeTAl
+5Bq1yGPG1tCM2AMOvaoUzoGlpHLuBCW0yuVHqz/Ffl+1zwYsO9krT1TOG8Wl8dvw
+fu/CLjDiPxkdLR0AhfEug8Ex3gV55ZHvBhRMZ1x0z6hugZLtK/xuOw==
+-----END RSA PRIVATE KEY-----`
+
+ csrRSAPem = `-----BEGIN CERTIFICATE REQUEST-----
+MIICijCCAXICAQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUx
+ITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBALd2SVGs7QkYlYPOz9MvE+0DDRUIutQQkiCnqcV1
+1nO84uSSUjH0ALLVyRKgWIkobEqWJurKIk+oV9O+Le8EABxkt/ru6jaIFqZkwFE1
+JzSDPkAsR9jXP5M2MWlmo7qVc4Bry3oqlN3eLSFJP2ZH7b1ia8q9oWXPMxDdwuKR
+kv9hfkUPszr/gaQCNQXW0BoRe5vdr5+ikv4lrKpsBxvaAoYL1ngR41lCPKhmrgXP
+oreEuUcXzfCSSAV1CGbW2qhWc4I7/JFA8qqEBr5GP+AntStGxSbDO8JjD7/uULHC
+AReCloDdJE0jDz1355/0CAH4WmEJE/TI8Bq+vgd0jgzRgk0CAwEAAaAAMA0GCSqG
+SIb3DQEBCwUAA4IBAQAR8U1vZMJf7YFvGU69QvoWPTDe/o8SwYy1j+++AAO9Y7H2
+C7nb+9tnEMtXm+3pkY0aJIecAnq8H4QWimOrJa/ZsoZLzz9LKW2nzARdWo63j4nB
+jKld/EDBzQ/nQSTyoX7s9JiDiSC9yqTXBrPHSXruPbh7sE0yXROar+6atjNdCpDp
+uLw86gwewDJrMaB1aFAmDvwaRQQDONwRy0zG1UdMxLQxsxpKOHaGM/ZvV3FPir2B
+7mKupki/dvap5UW0lTMJBlKf3qhoeHKMHFo9i5vGCIkWUIv+XgTF0NjbYv9i7bfq
+WdW905v4wiuWRlddNwqFtLx9Pf1/fRJVT5mBbjIx
+-----END CERTIFICATE REQUEST-----`
+
+ certRSAPem = `-----BEGIN CERTIFICATE-----
+MIIDfDCCAmSgAwIBAgIUad4Q9EhVvqc06H7fCfKaLGcyDw0wDQYJKoZIhvcNAQEL
+BQAwNzE1MDMGA1UEAxMsVmF1bHQgVGVzdGluZyBJbnRlcm1lZGlhdGUgU3ViIFN1
+YiBBdXRob3JpdHkwHhcNMTYwODA0MTkyMjAyWhcNMTYwODA0MjAyMjMyWjAhMR8w
+HQYDVQQDExZWYXVsdCBUZXN0IENlcnRpZmljYXRlMIIBIjANBgkqhkiG9w0BAQEF
+AAOCAQ8AMIIBCgKCAQEAvdpq+/g/7RpFuZ7wx/32JDi8sG8bFPcFO5SlTwnY/szc
+0GB4osU2bZmT2FUwY9u5WYGlA1F7osPUbgVh07gG/wqZ9SPu9aiW4fBYFqLfjEQx
+SrldDy5IB8+XkVQuCZisvYW3IKNatBXxn6sS/lUJiNBnEOCGCv6aVTKym5+J3SIO
+9LkeOrz4lNdRqkSM+cJIj3i5Ja4aFmuahCByDZ0fN3h1G4o2grqjxHM9EE89IizP
+HX7m1Wifhp/w7HlLZLvqgXXGHFCtoUMgnDUSrbved4K+YYRF9DfceSQW9dFmiNVC
++RxmtsGup3We8lW+Ngdlkx8zUEUeQq+DeL3t9f/4EQIDAQABo4GVMIGSMA4GA1Ud
+DwEB/wQEAwIDqDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwHQYDVR0O
+BBYEFMKLVTrdDyyF0kTxkxcAMcGNcGWlMB8GA1UdIwQYMBaAFNmGqFL215GlYyD0
+mWVIoMB71s+NMCEGA1UdEQQaMBiCFlZhdWx0IFRlc3QgQ2VydGlmaWNhdGUwDQYJ
+KoZIhvcNAQELBQADggEBAJJP9OWG3W5uUluKdeFYCzKMIY+rsCUb86QrKRqQ5xYR
+w4pKC3yuryEfreBs3iQA4NNw2mMWxuI8t/i+km2H7NzQytTRn6L0sxTa8ThNZ3e7
+xCdWaZZzd1O6Xwq/pDbE1MZ/4z5nvsKaKJVVIvVFL5algi4A8njiFMVSww035c1e
+waLww4AOHydlLky/RJBJPOkQNoDBToC9ojDqPtNJVWWaQL2TsUCu+Q+L5QL5djgj
+LxPwqGOiM4SLSUrXSXMpHNLX1rhBH1/sNb3Kn1FDBaZ+M9kZglCDwuQyQuH8xKwB
+qukeKfgFUp7rH0yoQTZa0eaXAYTFoRLjnTQ+fS7e19s=
+-----END CERTIFICATE-----`
+
+ privECKeyPem = `-----BEGIN EC PRIVATE KEY-----
+MHcCAQEEICC2XihYLxEYEseFesZEXjV1va6rMAdtkpkaxT4hGu5boAoGCCqGSM49
+AwEHoUQDQgAEti0uWkq7MAkQevNNrBpYY0FLni8OAZroHXkij2x6Vo0xIvClftbC
+L33BU/520t23TcewtQYsNqv86Bvhx9PeAw==
+-----END EC PRIVATE KEY-----`
+
+ csrECPem = `-----BEGIN CERTIFICATE REQUEST-----
+MIHsMIGcAgEAMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
+HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwTjAQBgcqhkjOPQIBBgUr
+gQQAIQM6AATBZ3VXwBE9oeSREpM5b25PW6WiuLb4EXWpKZyjj552QYKYe7QBuGe9
+wvvgOeCBovN3tSuGKzTiUKAAMAoGCCqGSM49BAMCAz8AMDwCHFap/5XDuqtXCG1g
+ljbYH5OWGBqGYCfL2k2+/6cCHAuk1bmOkGx7JAq/fSPd09i0DQIqUu7WHQHms48=
+-----END CERTIFICATE REQUEST-----`
+
+ privEC8KeyPem = `-----BEGIN PRIVATE KEY-----
+MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgILZeKFgvERgSx4V6
+xkReNXW9rqswB22SmRrFPiEa7luhRANCAAS2LS5aSrswCRB6802sGlhjQUueLw4B
+mugdeSKPbHpWjTEi8KV+1sIvfcFT/nbS3bdNx7C1Biw2q/zoG+HH094D
+-----END PRIVATE KEY-----`
+
+ certECPem = `-----BEGIN CERTIFICATE-----
+MIICtzCCAZ+gAwIBAgIUNDYMWd9SOGVMs4I1hezvRnGDMyUwDQYJKoZIhvcNAQEL
+BQAwNzE1MDMGA1UEAxMsVmF1bHQgVGVzdGluZyBJbnRlcm1lZGlhdGUgU3ViIFN1
+YiBBdXRob3JpdHkwHhcNMTYwODA0MTkyOTM0WhcNMTYwODA0MjAzMDA0WjAkMSIw
+IAYDVQQDExlWYXVsdCBUZXN0IEVDIENlcnRpZmljYXRlMFkwEwYHKoZIzj0CAQYI
+KoZIzj0DAQcDQgAEti0uWkq7MAkQevNNrBpYY0FLni8OAZroHXkij2x6Vo0xIvCl
+ftbCL33BU/520t23TcewtQYsNqv86Bvhx9PeA6OBmDCBlTAOBgNVHQ8BAf8EBAMC
+A6gwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBStnbW/
+ga2/dz4FyRNafwhTzM1UbzAfBgNVHSMEGDAWgBTZhqhS9teRpWMg9JllSKDAe9bP
+jTAkBgNVHREEHTAbghlWYXVsdCBUZXN0IEVDIENlcnRpZmljYXRlMA0GCSqGSIb3
+DQEBCwUAA4IBAQBsPhwRB51de3sGBMnjDiOMViYpRH7kKhUWAY1W2W/1hqk5HgZw
+4c3r0LmdIQ94gShaXng8ojYRDW/5D7LeXJdbtLy9U29xfeCb+vqKDc2oN7Ps3/HB
+4YLnseqDiZFKPEAdOE4rtwyFWJI7JR9sOSG1B5El6duN0i9FWOLSklQ4EbV5R45r
+cy/fJq0DOYje7MXsFuNl5iQ92gfDjPD2P98DK9lCIquSzB3WkpjE41UtKJ0IKPeD
+wYoyl0J33Alxq2eC2subR7xISR3MzZFcdkzNNrBddeaSviYlR4SgTUiqOldAcdR4
+QZxtxazcUqQDZ+wZFOpBOnp94bzVeXT9BF+L
+-----END CERTIFICATE-----`
+
+ issuingCaChainPem = []string{`-----BEGIN CERTIFICATE-----
+MIIDljCCAn6gAwIBAgIUHjciEzUzeNVqI9mwFJeduNtXWzMwDQYJKoZIhvcNAQEL
+BQAwMzExMC8GA1UEAxMoVmF1bHQgVGVzdGluZyBJbnRlcm1lZGlhdGUgU3ViIEF1
+dGhvcml0eTAeFw0xNjA4MDQxOTEyNTdaFw0xNjA4MDUyMDEzMjdaMDcxNTAzBgNV
+BAMTLFZhdWx0IFRlc3RpbmcgSW50ZXJtZWRpYXRlIFN1YiBTdWIgQXV0aG9yaXR5
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAy2pAH1U8KzhjO+MLRPTb
+ic7Iyk57d0TFnj6CAJWqZaKNGXoTkwD8wRCirY8mQv8YrfBy3hwGqSLYj6oxwA0R
+8FxsiWdf4gFTX2cJpxThFnIllGbzqIXnEZLvCIMydp44Ls9eYxoXfZQ9X24u/Wmf
+kWEQFGUzrpyklkIOx2Yo5g7OHbFLl3OfPz89/TDM8VeymlGzCTJZ+Y+iNGDBPT0L
+X9aE65lL76dUx/bcKnfQEgAcH4nkE4K/Kgjnj5umZKQUH4+6wKFwDCQT2RwaBkve
+WyAiz0LY9a1WFXt7RYCPs+QWLJAhv7wJL8l4gnxYA1k+ovLXDjUqYweU+WHV6/lR
+7wIDAQABo4GdMIGaMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0G
+A1UdDgQWBBTZhqhS9teRpWMg9JllSKDAe9bPjTAfBgNVHSMEGDAWgBRTY6430DXg
+cIDAnEnA+fostjcbqDA3BgNVHREEMDAugixWYXVsdCBUZXN0aW5nIEludGVybWVk
+aWF0ZSBTdWIgU3ViIEF1dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAQEAZp3VwrUw
+jw6TzjZJEnXkfRxLWZNmqMPAXYOtBl/+5FjAfehifTTzIdIxR4mfdgH5YZnSQpzY
+m/w17BXElao8uOX6CUaX+sLTVzwsl2csswpcGlNwHooVREoMq9X187qxSr1HS7zF
+O550XgDVIf5e7sXrVuV1rd1XUo3xZLaSLUhU70y/343mcN2TRUslXO4QrIE5lo2v
+awyQl0NW0hSO0F9VZYzOvPPVwu7mf1ijTzbkPtUbAXDnmlvOCrlx2JZd/BqXb75e
+UgYDq7hIyQ109FBOjv0weAM5tZCdesyvro4/43Krd8pa74zHdZMjfQAsTr66WOi4
+yedj8LnWl66JOA==
+-----END CERTIFICATE-----`,
+ `-----BEGIN CERTIFICATE-----
+MIIDijCCAnKgAwIBAgIUBNDYCUsOT2Wth8Fz3layfjEVbcIwDQYJKoZIhvcNAQEL
+BQAwLzEtMCsGA1UEAxMkVmF1bHQgVGVzdGluZyBJbnRlcm1lZGlhdGUgQXV0aG9y
+aXR5MB4XDTE2MDgwNDE5MTI1NloXDTE2MDgwNjIxMTMyNlowMzExMC8GA1UEAxMo
+VmF1bHQgVGVzdGluZyBJbnRlcm1lZGlhdGUgU3ViIEF1dGhvcml0eTCCASIwDQYJ
+KoZIhvcNAQEBBQADggEPADCCAQoCggEBALHoD7g5YYu2akO8hkFlUCF45Bxjckq4
+WTyDIcDwv/wr7vhZnngCClnP+7Rc30XTmkq2RnH6N7iuqowGM5RNcBV/C9R1weVx
+9esXtWr/AUMyuNb3HSjwDwQGuiAVEgk67fXYy08Ii78+ap3uY3CKC1AFDkHdgDZt
+e946rJ3Nps00TcH0KwyP5voitLgt6dMBR9ttuUdSoQ4uLQDdDf0HRw/IAQswO4Av
+lgUgQObBecnLGhh7e3PM5VVz5f0IqG2ZYnDs3ncl2UYOrj0/JqOMDIMvSQMc2pzS
+Hjty0d1wKWWPC9waguL/24oQR4VG5b7TL62elc2kcEg7r8u5L/sCi/8CAwEAAaOB
+mTCBljAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU
+U2OuN9A14HCAwJxJwPn6LLY3G6gwHwYDVR0jBBgwFoAUgAz80p6Pkzk6Cb7lYmTI
+T1jc7iYwMwYDVR0RBCwwKoIoVmF1bHQgVGVzdGluZyBJbnRlcm1lZGlhdGUgU3Vi
+IEF1dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAQEACXjzGVqRZi6suHwBqvXwVxlS
+nf5YwudfBDJ4LfNd5nTCypsvHkfXnaki6LZMCS1rwPvxzssZ5Wp/7zO5fu6lpSTx
+yjuiH5fBUGHy+f1Ygu6tlAZtUnxAi6pU4eoCDNZpqunJMM4IdaahHeICdjPhx/bH
+AlmwaN0FsNvOlgUuPTjQ3z6jMZn3p2lXI3HiRlcz+nR7gQizPb2L7u8mQ+5EZFmC
+AmXMj40g3bTJVmKoGeAR7cb0pYG/GUELmERjEjCfP7W15eYfuu1j7EYTUAVuPAlJ
+34HDxCuM8cPJwCGMDKfb3Q39AYRmLT6sE3/sq2CZ5xlj8wfwDpVfpXikRDpI0A==
+-----END CERTIFICATE-----`,
+ `-----BEGIN CERTIFICATE-----
+MIIDejCCAmKgAwIBAgIUEtjlbdzIth3U71TELA0PVW7HvaEwDQYJKoZIhvcNAQEL
+BQAwJzElMCMGA1UEAxMcVmF1bHQgVGVzdGluZyBSb290IEF1dGhvcml0eTAeFw0x
+NjA4MDQxOTEyNTVaFw0xNjA4MDgyMzEzMjVaMC8xLTArBgNVBAMTJFZhdWx0IFRl
+c3RpbmcgSW50ZXJtZWRpYXRlIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQAD
+ggEPADCCAQoCggEBAMYAQAHCm9V9062NF/UuAa6z6aYqsS5g2YGkd9DvgYxfU5JI
+yIdSz7rkp9QprlQYl2abptZocq+1C9yRVmRJWKjZYDckSwXdmQam/sOfNuiw6Gbd
+3OJGdQ82jhx3v3mIQp+3u9E43wXX0StaJ44+9DgkgwG8iybiv4fh0LzuHPSeKsXe
+/IvJZ0YAInWuzFNegYxU32UT2CEvLtZdru8+sLr4NFWRu/nYIMPJDeZ2JEQVi9IF
+lcB3dP63c6vMBrn4Wn2xBo12JPsQp+ezf5Z5zmtAe68PwRmIXZVAUa2q+CfEuJ36
+66756Ypa0Z3brhPWfX2ahhxSg8DjqFGmZZ5Gfl8CAwEAAaOBlTCBkjAOBgNVHQ8B
+Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUgAz80p6Pkzk6Cb7l
+YmTIT1jc7iYwHwYDVR0jBBgwFoAU6dC1U32HZp7iq97KSu2i+g8+rf4wLwYDVR0R
+BCgwJoIkVmF1bHQgVGVzdGluZyBJbnRlcm1lZGlhdGUgQXV0aG9yaXR5MA0GCSqG
+SIb3DQEBCwUAA4IBAQA6xVMyuZgpJhIQnG2LwwD5Zcbmm4+rHkGVNSpwkUH8ga8X
+b4Owog+MvBw8R7ADVwAh/aOh1/qsRfHv8KkMWW+SAQ84ICVXJiPBzEUJaMWujpyr
+SDkbaD05avRtfvSrPCagaUGVRt+wK24g8hpJqQ+trkufzjq9ySU018+NNX9yGRyA
+VjwZAqALlNEAkdcvd4adEBpZqum2x1Fl9EXnjp6NEWQ7nuGkp3X2DP4gDtQPxgmn
+omOo4GHhO0U57exEIl0d4kiy9WU0qcIISOr6I+gzesMooX6aI43CaqJoZKsHXYY6
+1uxFLss+/wDtvIcyXdTdjPrgD38YIgk1/iKNIgKO
+-----END CERTIFICATE-----`}
+)
diff --git a/vendor/github.com/hashicorp/vault/helper/cidrutil/cidr.go b/vendor/github.com/hashicorp/vault/helper/cidrutil/cidr.go
new file mode 100644
index 0000000..8031bb8
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/cidrutil/cidr.go
@@ -0,0 +1,216 @@
+package cidrutil
+
+import (
+ "fmt"
+ "net"
+ "strings"
+
+ "github.com/hashicorp/vault/helper/strutil"
+)
+
+// IPBelongsToCIDR checks if the given IP is encompassed by the given CIDR block
+func IPBelongsToCIDR(ipAddr string, cidr string) (bool, error) {
+ if ipAddr == "" {
+ return false, fmt.Errorf("missing IP address")
+ }
+
+ ip := net.ParseIP(ipAddr)
+ if ip == nil {
+ return false, fmt.Errorf("invalid IP address")
+ }
+
+ _, ipnet, err := net.ParseCIDR(cidr)
+ if err != nil {
+ return false, err
+ }
+
+ if !ipnet.Contains(ip) {
+ return false, nil
+ }
+
+ return true, nil
+}
+
+// IPBelongsToCIDRBlocksString checks if the given IP is encompassed by any of
+// the given CIDR blocks, when the input is a string composed by joining all
+// the CIDR blocks using a separator. The input is separated based on the given
+// separator and the IP is checked to be belonged by any CIDR block.
+func IPBelongsToCIDRBlocksString(ipAddr string, cidrList, separator string) (bool, error) {
+ if ipAddr == "" {
+ return false, fmt.Errorf("missing IP address")
+ }
+
+ if cidrList == "" {
+ return false, fmt.Errorf("missing CIDR list")
+ }
+
+ if separator == "" {
+ return false, fmt.Errorf("missing separator")
+ }
+
+ if ip := net.ParseIP(ipAddr); ip == nil {
+ return false, fmt.Errorf("invalid IP address")
+ }
+
+ return IPBelongsToCIDRBlocksSlice(ipAddr, strutil.ParseDedupLowercaseAndSortStrings(cidrList, separator))
+}
+
+// IPBelongsToCIDRBlocksSlice checks if the given IP is encompassed by any of the given
+// CIDR blocks
+func IPBelongsToCIDRBlocksSlice(ipAddr string, cidrs []string) (bool, error) {
+ if ipAddr == "" {
+ return false, fmt.Errorf("missing IP address")
+ }
+
+ if len(cidrs) == 0 {
+ return false, fmt.Errorf("missing CIDR blocks to be checked against")
+ }
+
+ if ip := net.ParseIP(ipAddr); ip == nil {
+ return false, fmt.Errorf("invalid IP address")
+ }
+
+ for _, cidr := range cidrs {
+ belongs, err := IPBelongsToCIDR(ipAddr, cidr)
+ if err != nil {
+ return false, err
+ }
+ if belongs {
+ return true, nil
+ }
+ }
+
+ return false, nil
+}
+
+// ValidateCIDRListString checks if the list of CIDR blocks are valid, given
+// that the input is a string composed by joining all the CIDR blocks using a
+// separator. The input is separated based on the given separator and validity
+// of each is checked.
+func ValidateCIDRListString(cidrList string, separator string) (bool, error) {
+ if cidrList == "" {
+ return false, fmt.Errorf("missing CIDR list that needs validation")
+ }
+ if separator == "" {
+ return false, fmt.Errorf("missing separator")
+ }
+
+ return ValidateCIDRListSlice(strutil.ParseDedupLowercaseAndSortStrings(cidrList, separator))
+}
+
+// ValidateCIDRListSlice checks if the given list of CIDR blocks are valid
+func ValidateCIDRListSlice(cidrBlocks []string) (bool, error) {
+ if len(cidrBlocks) == 0 {
+ return false, fmt.Errorf("missing CIDR blocks that needs validation")
+ }
+
+ for _, block := range cidrBlocks {
+ if _, _, err := net.ParseCIDR(strings.TrimSpace(block)); err != nil {
+ return false, err
+ }
+ }
+
+ return true, nil
+}
+
+// Subset checks if the IPs belonging to a given CIDR block is a subset of IPs
+// belonging to another CIDR block.
+func Subset(cidr1, cidr2 string) (bool, error) {
+ if cidr1 == "" {
+ return false, fmt.Errorf("missing CIDR to be checked against")
+ }
+
+ if cidr2 == "" {
+ return false, fmt.Errorf("missing CIDR that needs to be checked")
+ }
+
+ ip1, net1, err := net.ParseCIDR(cidr1)
+ if err != nil {
+ return false, fmt.Errorf("failed to parse the CIDR to be checked against: %q", err)
+ }
+
+ zeroAddr := false
+ if ip := ip1.To4(); ip != nil && ip.Equal(net.IPv4zero) {
+ zeroAddr = true
+ }
+ if ip := ip1.To16(); ip != nil && ip.Equal(net.IPv6zero) {
+ zeroAddr = true
+ }
+
+ maskLen1, _ := net1.Mask.Size()
+ if !zeroAddr && maskLen1 == 0 {
+ return false, fmt.Errorf("CIDR to be checked against is not in its canonical form")
+ }
+
+ ip2, net2, err := net.ParseCIDR(cidr2)
+ if err != nil {
+ return false, fmt.Errorf("failed to parse the CIDR that needs to be checked: %q", err)
+ }
+
+ zeroAddr = false
+ if ip := ip2.To4(); ip != nil && ip.Equal(net.IPv4zero) {
+ zeroAddr = true
+ }
+ if ip := ip2.To16(); ip != nil && ip.Equal(net.IPv6zero) {
+ zeroAddr = true
+ }
+
+ maskLen2, _ := net2.Mask.Size()
+ if !zeroAddr && maskLen2 == 0 {
+ return false, fmt.Errorf("CIDR that needs to be checked is not in its canonical form")
+ }
+
+ // If the mask length of the CIDR that needs to be checked is smaller
+ // then the mask length of the CIDR to be checked against, then the
+ // former will encompass more IPs than the latter, and hence can't be a
+ // subset of the latter.
+ if maskLen2 < maskLen1 {
+ return false, nil
+ }
+
+ belongs, err := IPBelongsToCIDR(net2.IP.String(), cidr1)
+ if err != nil {
+ return false, err
+ }
+
+ return belongs, nil
+}
+
+// SubsetBlocks checks if each CIDR block of a given set of CIDR blocks, is a
+// subset of at least one CIDR block belonging to another set of CIDR blocks.
+// First parameter is the set of CIDR blocks to check against and the second
+// parameter is the set of CIDR blocks that needs to be checked.
+func SubsetBlocks(cidrBlocks1, cidrBlocks2 []string) (bool, error) {
+ if len(cidrBlocks1) == 0 {
+ return false, fmt.Errorf("missing CIDR blocks to be checked against")
+ }
+
+ if len(cidrBlocks2) == 0 {
+ return false, fmt.Errorf("missing CIDR blocks that needs to be checked")
+ }
+
+ // Check if all the elements of cidrBlocks2 is a subset of at least one
+ // element of cidrBlocks1
+ for _, cidrBlock2 := range cidrBlocks2 {
+ isSubset := false
+ for _, cidrBlock1 := range cidrBlocks1 {
+ subset, err := Subset(cidrBlock1, cidrBlock2)
+ if err != nil {
+ return false, err
+ }
+ // If CIDR is a subset of any of the CIDR block, its
+ // good enough. Break out.
+ if subset {
+ isSubset = true
+ break
+ }
+ }
+ // CIDR block was not a subset of any of the CIDR blocks in the
+ // set of blocks to check against
+ if !isSubset {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/cidrutil/cidr_test.go b/vendor/github.com/hashicorp/vault/helper/cidrutil/cidr_test.go
new file mode 100644
index 0000000..f6d5849
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/cidrutil/cidr_test.go
@@ -0,0 +1,240 @@
+package cidrutil
+
+import "testing"
+
+func TestCIDRUtil_IPBelongsToCIDR(t *testing.T) {
+ ip := "192.168.25.30"
+ cidr := "192.168.26.30/16"
+
+ belongs, err := IPBelongsToCIDR(ip, cidr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !belongs {
+ t.Fatalf("expected IP %q to belong to CIDR %q", ip, cidr)
+ }
+
+ ip = "10.197.192.6"
+ cidr = "10.197.192.0/18"
+ belongs, err = IPBelongsToCIDR(ip, cidr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !belongs {
+ t.Fatalf("expected IP %q to belong to CIDR %q", ip, cidr)
+ }
+
+ ip = "192.168.25.30"
+ cidr = "192.168.26.30/24"
+ belongs, err = IPBelongsToCIDR(ip, cidr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if belongs {
+ t.Fatalf("expected IP %q to not belong to CIDR %q", ip, cidr)
+ }
+
+ ip = "192.168.25.30.100"
+ cidr = "192.168.26.30/24"
+ belongs, err = IPBelongsToCIDR(ip, cidr)
+ if err == nil {
+ t.Fatalf("expected an error")
+ }
+}
+
+func TestCIDRUtil_IPBelongsToCIDRBlocksString(t *testing.T) {
+ ip := "192.168.27.29"
+ cidrList := "172.169.100.200/18,192.168.0.0/16,10.10.20.20/24"
+
+ belongs, err := IPBelongsToCIDRBlocksString(ip, cidrList, ",")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !belongs {
+ t.Fatalf("expected IP %q to belong to one of the CIDRs in %q", ip, cidrList)
+ }
+
+ ip = "10.197.192.6"
+ cidrList = "1.2.3.0/8,10.197.192.0/18,10.197.193.0/24"
+
+ belongs, err = IPBelongsToCIDRBlocksString(ip, cidrList, ",")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !belongs {
+ t.Fatalf("expected IP %q to belong to one of the CIDRs in %q", ip, cidrList)
+ }
+
+ ip = "192.168.27.29"
+ cidrList = "172.169.100.200/18,192.168.0.0.0/16,10.10.20.20/24"
+
+ belongs, err = IPBelongsToCIDRBlocksString(ip, cidrList, ",")
+ if err == nil {
+ t.Fatalf("expected an error")
+ }
+
+ ip = "30.40.50.60"
+ cidrList = "172.169.100.200/18,192.168.0.0/16,10.10.20.20/24"
+
+ belongs, err = IPBelongsToCIDRBlocksString(ip, cidrList, ",")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if belongs {
+ t.Fatalf("expected IP %q to not belong to one of the CIDRs in %q", ip, cidrList)
+ }
+
+}
+
+func TestCIDRUtil_IPBelongsToCIDRBlocksSlice(t *testing.T) {
+ ip := "192.168.27.29"
+ cidrList := []string{"172.169.100.200/18", "192.168.0.0/16", "10.10.20.20/24"}
+
+ belongs, err := IPBelongsToCIDRBlocksSlice(ip, cidrList)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !belongs {
+ t.Fatalf("expected IP %q to belong to one of the CIDRs in %q", ip, cidrList)
+ }
+
+ ip = "192.168.27.29"
+ cidrList = []string{"172.169.100.200/18", "192.168.0.0.0/16", "10.10.20.20/24"}
+
+ belongs, err = IPBelongsToCIDRBlocksSlice(ip, cidrList)
+ if err == nil {
+ t.Fatalf("expected an error")
+ }
+
+ ip = "30.40.50.60"
+ cidrList = []string{"172.169.100.200/18", "192.168.0.0/16", "10.10.20.20/24"}
+
+ belongs, err = IPBelongsToCIDRBlocksSlice(ip, cidrList)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if belongs {
+ t.Fatalf("expected IP %q to not belong to one of the CIDRs in %q", ip, cidrList)
+ }
+}
+
+func TestCIDRUtil_ValidateCIDRListString(t *testing.T) {
+ cidrList := "172.169.100.200/18,192.168.0.0/16,10.10.20.20/24"
+
+ valid, err := ValidateCIDRListString(cidrList, ",")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !valid {
+ t.Fatalf("expected CIDR list %q to be valid", cidrList)
+ }
+
+ cidrList = "172.169.100.200,192.168.0.0/16,10.10.20.20/24"
+ valid, err = ValidateCIDRListString(cidrList, ",")
+ if err == nil {
+ t.Fatal("expected an error")
+ }
+
+ cidrList = "172.169.100.200/18,192.168.0.0.0/16,10.10.20.20/24"
+ valid, err = ValidateCIDRListString(cidrList, ",")
+ if err == nil {
+ t.Fatal("expected an error")
+ }
+}
+
+func TestCIDRUtil_ValidateCIDRListSlice(t *testing.T) {
+ cidrList := []string{"172.169.100.200/18", "192.168.0.0/16", "10.10.20.20/24"}
+
+ valid, err := ValidateCIDRListSlice(cidrList)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !valid {
+ t.Fatalf("expected CIDR list %q to be valid", cidrList)
+ }
+
+ cidrList = []string{"172.169.100.200", "192.168.0.0/16", "10.10.20.20/24"}
+ valid, err = ValidateCIDRListSlice(cidrList)
+ if err == nil {
+ t.Fatal("expected an error")
+ }
+
+ cidrList = []string{"172.169.100.200/18", "192.168.0.0.0/16", "10.10.20.20/24"}
+ valid, err = ValidateCIDRListSlice(cidrList)
+ if err == nil {
+ t.Fatal("expected an error")
+ }
+}
+
+func TestCIDRUtil_Subset(t *testing.T) {
+ cidr1 := "192.168.27.29/24"
+ cidr2 := "192.168.27.29/24"
+ subset, err := Subset(cidr1, cidr2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !subset {
+ t.Fatalf("expected CIDR %q to be a subset of CIDR %q", cidr2, cidr1)
+ }
+
+ cidr1 = "192.168.27.29/16"
+ cidr2 = "192.168.27.29/24"
+ subset, err = Subset(cidr1, cidr2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !subset {
+ t.Fatalf("expected CIDR %q to be a subset of CIDR %q", cidr2, cidr1)
+ }
+
+ cidr1 = "192.168.27.29/24"
+ cidr2 = "192.168.27.29/16"
+ subset, err = Subset(cidr1, cidr2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if subset {
+ t.Fatalf("expected CIDR %q to not be a subset of CIDR %q", cidr2, cidr1)
+ }
+
+ cidr1 = "192.168.0.128/25"
+ cidr2 = "192.168.0.0/24"
+ subset, err = Subset(cidr1, cidr2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if subset {
+ t.Fatalf("expected CIDR %q to not be a subset of CIDR %q", cidr2, cidr1)
+ }
+ subset, err = Subset(cidr2, cidr1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !subset {
+ t.Fatalf("expected CIDR %q to be a subset of CIDR %q", cidr1, cidr2)
+ }
+}
+
+func TestCIDRUtil_SubsetBlocks(t *testing.T) {
+ cidrBlocks1 := []string{"192.168.27.29/16", "172.245.30.40/24", "10.20.30.40/30"}
+ cidrBlocks2 := []string{"192.168.27.29/20", "172.245.30.40/25", "10.20.30.40/32"}
+
+ subset, err := SubsetBlocks(cidrBlocks1, cidrBlocks2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !subset {
+ t.Fatalf("expected CIDR blocks %q to be a subset of CIDR blocks %q", cidrBlocks2, cidrBlocks1)
+ }
+
+ cidrBlocks1 = []string{"192.168.27.29/16", "172.245.30.40/25", "10.20.30.40/30"}
+ cidrBlocks2 = []string{"192.168.27.29/20", "172.245.30.40/24", "10.20.30.40/32"}
+
+ subset, err = SubsetBlocks(cidrBlocks1, cidrBlocks2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if subset {
+ t.Fatalf("expected CIDR blocks %q to not be a subset of CIDR blocks %q", cidrBlocks2, cidrBlocks1)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/compressutil/compress_test.go b/vendor/github.com/hashicorp/vault/helper/compressutil/compress_test.go
new file mode 100644
index 0000000..52b03d5
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/compressutil/compress_test.go
@@ -0,0 +1,192 @@
+package compressutil
+
+import (
+ "bytes"
+ "compress/gzip"
+ "encoding/json"
+ "testing"
+)
+
+func TestCompressUtil_CompressDecompress(t *testing.T) {
+ input := map[string]interface{}{
+ "sample": "data",
+ "verification": "process",
+ }
+
+ // Encode input into JSON
+ var buf bytes.Buffer
+ enc := json.NewEncoder(&buf)
+ if err := enc.Encode(input); err != nil {
+ t.Fatal(err)
+ }
+
+ inputJSONBytes := buf.Bytes()
+ // Test nil configuration
+ if _, err := Compress(inputJSONBytes, nil); err == nil {
+ t.Fatal("expected an error")
+ }
+
+ // Test invalid configuration
+ if _, err := Compress(inputJSONBytes, &CompressionConfig{}); err == nil {
+ t.Fatal("expected an error")
+ }
+
+ // Compress input using lzw format
+ compressedJSONBytes, err := Compress(inputJSONBytes, &CompressionConfig{
+ Type: CompressionTypeLzw,
+ })
+ if err != nil {
+ t.Fatal("expected an error")
+ }
+ if len(compressedJSONBytes) == 0 {
+ t.Fatal("failed to compress data in lzw format")
+ }
+ // Check the presense of the canary
+ if compressedJSONBytes[0] != CompressionCanaryLzw {
+ t.Fatalf("bad: compression canary: expected: %d actual: %d", CompressionCanaryLzw, compressedJSONBytes[0])
+ }
+
+ // Decompress the input and check the output
+ decompressedJSONBytes, uncompressed, err := Decompress(compressedJSONBytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if uncompressed {
+ t.Fatal("failed to recognize compressed data")
+ }
+ if len(decompressedJSONBytes) == 0 {
+ t.Fatal("failed to decompress lzw formatted data")
+ }
+
+ if string(inputJSONBytes) != string(decompressedJSONBytes) {
+ t.Fatalf("bad: mismatch: inputJSONBytes: %s\n decompressedJSONBytes: %s", string(inputJSONBytes), string(decompressedJSONBytes))
+ }
+
+ // Compress input using Gzip format, assume DefaultCompression
+ compressedJSONBytes, err = Compress(inputJSONBytes, &CompressionConfig{
+ Type: CompressionTypeGzip,
+ })
+ if err != nil {
+ t.Fatal("expected an error")
+ }
+ if len(compressedJSONBytes) == 0 {
+ t.Fatal("failed to compress data in lzw format")
+ }
+ // Check the presense of the canary
+ if compressedJSONBytes[0] != CompressionCanaryGzip {
+ t.Fatalf("bad: compression canary: expected: %d actual: %d", CompressionCanaryGzip, compressedJSONBytes[0])
+ }
+
+ // Decompress the input and check the output
+ decompressedJSONBytes, uncompressed, err = Decompress(compressedJSONBytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if uncompressed {
+ t.Fatal("failed to recognize compressed data")
+ }
+ if len(decompressedJSONBytes) == 0 {
+ t.Fatal("failed to decompress lzw formatted data")
+ }
+
+ if string(inputJSONBytes) != string(decompressedJSONBytes) {
+ t.Fatalf("bad: mismatch: inputJSONBytes: %s\n decompressedJSONBytes: %s", string(inputJSONBytes), string(decompressedJSONBytes))
+ }
+
+ // Compress input using Gzip format: DefaultCompression
+ compressedJSONBytes, err = Compress(inputJSONBytes, &CompressionConfig{
+ Type: CompressionTypeGzip,
+ GzipCompressionLevel: gzip.DefaultCompression,
+ })
+ if err != nil {
+ t.Fatal("expected an error")
+ }
+ if len(compressedJSONBytes) == 0 {
+ t.Fatal("failed to compress data in lzw format")
+ }
+ // Check the presense of the canary
+ if compressedJSONBytes[0] != CompressionCanaryGzip {
+ t.Fatalf("bad: compression canary: expected: %d actual: %d", CompressionCanaryGzip, compressedJSONBytes[0])
+ }
+
+ // Decompress the input and check the output
+ decompressedJSONBytes, uncompressed, err = Decompress(compressedJSONBytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if uncompressed {
+ t.Fatal("failed to recognize compressed data")
+ }
+ if len(decompressedJSONBytes) == 0 {
+ t.Fatal("failed to decompress lzw formatted data")
+ }
+
+ if string(inputJSONBytes) != string(decompressedJSONBytes) {
+ t.Fatalf("bad: mismatch: inputJSONBytes: %s\n decompressedJSONBytes: %s", string(inputJSONBytes), string(decompressedJSONBytes))
+ }
+
+ // Compress input using Gzip format, BestCompression
+ compressedJSONBytes, err = Compress(inputJSONBytes, &CompressionConfig{
+ Type: CompressionTypeGzip,
+ GzipCompressionLevel: gzip.BestCompression,
+ })
+ if err != nil {
+ t.Fatal("expected an error")
+ }
+ if len(compressedJSONBytes) == 0 {
+ t.Fatal("failed to compress data in lzw format")
+ }
+ // Check the presense of the canary
+ if compressedJSONBytes[0] != CompressionCanaryGzip {
+ t.Fatalf("bad: compression canary: expected: %d actual: %d", CompressionCanaryGzip, compressedJSONBytes[0])
+ }
+
+ // Decompress the input and check the output
+ decompressedJSONBytes, uncompressed, err = Decompress(compressedJSONBytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if uncompressed {
+ t.Fatal("failed to recognize compressed data")
+ }
+ if len(decompressedJSONBytes) == 0 {
+ t.Fatal("failed to decompress lzw formatted data")
+ }
+
+ if string(inputJSONBytes) != string(decompressedJSONBytes) {
+ t.Fatalf("bad: mismatch: inputJSONBytes: %s\n decompressedJSONBytes: %s", string(inputJSONBytes), string(decompressedJSONBytes))
+ }
+
+ // Compress input using Gzip format, BestSpeed
+ compressedJSONBytes, err = Compress(inputJSONBytes, &CompressionConfig{
+ Type: CompressionTypeGzip,
+ GzipCompressionLevel: gzip.BestSpeed,
+ })
+ if err != nil {
+ t.Fatal("expected an error")
+ }
+ if len(compressedJSONBytes) == 0 {
+ t.Fatal("failed to compress data in lzw format")
+ }
+ // Check the presense of the canary
+ if compressedJSONBytes[0] != CompressionCanaryGzip {
+ t.Fatalf("bad: compression canary: expected: %d actual: %d",
+ CompressionCanaryGzip, compressedJSONBytes[0])
+ }
+
+ // Decompress the input and check the output
+ decompressedJSONBytes, uncompressed, err = Decompress(compressedJSONBytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if uncompressed {
+ t.Fatal("failed to recognize compressed data")
+ }
+ if len(decompressedJSONBytes) == 0 {
+ t.Fatal("failed to decompress lzw formatted data")
+ }
+
+ if string(inputJSONBytes) != string(decompressedJSONBytes) {
+ t.Fatalf("bad: mismatch: inputJSONBytes: %s\n decompressedJSONBytes: %s", string(inputJSONBytes), string(decompressedJSONBytes))
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/consts/consts.go b/vendor/github.com/hashicorp/vault/helper/consts/consts.go
new file mode 100644
index 0000000..2ec952b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/consts/consts.go
@@ -0,0 +1,7 @@
+package consts
+
+const (
+ // ExpirationRestoreWorkerCount specifies the numer of workers to use while
+ // restoring leases into the expiration manager
+ ExpirationRestoreWorkerCount = 64
+)
diff --git a/vendor/github.com/hashicorp/vault/helper/consts/error.go b/vendor/github.com/hashicorp/vault/helper/consts/error.go
new file mode 100644
index 0000000..d96ba4f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/consts/error.go
@@ -0,0 +1,13 @@
+package consts
+
+import "errors"
+
+var (
+ // ErrSealed is returned if an operation is performed on a sealed barrier.
+ // No operation is expected to succeed before unsealing
+ ErrSealed = errors.New("Vault is sealed")
+
+ // ErrStandby is returned if an operation is performed on a standby Vault.
+ // No operation is expected to succeed until active.
+ ErrStandby = errors.New("Vault is in standby mode")
+)
diff --git a/vendor/github.com/hashicorp/vault/helper/consts/replication.go b/vendor/github.com/hashicorp/vault/helper/consts/replication.go
new file mode 100644
index 0000000..62bbcb3
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/consts/replication.go
@@ -0,0 +1,20 @@
+package consts
+
+type ReplicationState uint32
+
+const (
+ ReplicationDisabled ReplicationState = iota
+ ReplicationPrimary
+ ReplicationSecondary
+)
+
+func (r ReplicationState) String() string {
+ switch r {
+ case ReplicationSecondary:
+ return "secondary"
+ case ReplicationPrimary:
+ return "primary"
+ }
+
+ return "disabled"
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/flag-kv/flag.go b/vendor/github.com/hashicorp/vault/helper/flag-kv/flag.go
new file mode 100644
index 0000000..3e8a8f7
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/flag-kv/flag.go
@@ -0,0 +1,29 @@
+package kvFlag
+
+import (
+ "fmt"
+ "strings"
+)
+
+// Flag is a flag.Value implementation for parsing user variables
+// from the command-line in the format of '-var key=value'.
+type Flag map[string]string
+
+func (v *Flag) String() string {
+ return ""
+}
+
+func (v *Flag) Set(raw string) error {
+ idx := strings.Index(raw, "=")
+ if idx == -1 {
+ return fmt.Errorf("No '=' value in arg: %s", raw)
+ }
+
+ if *v == nil {
+ *v = make(map[string]string)
+ }
+
+ key, value := raw[0:idx], raw[idx+1:]
+ (*v)[key] = value
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/flag-kv/flag_test.go b/vendor/github.com/hashicorp/vault/helper/flag-kv/flag_test.go
new file mode 100644
index 0000000..2fc88aa
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/flag-kv/flag_test.go
@@ -0,0 +1,56 @@
+package kvFlag
+
+import (
+ "flag"
+ "reflect"
+ "testing"
+)
+
+func TestFlag_impl(t *testing.T) {
+ var _ flag.Value = new(Flag)
+}
+
+func TestFlag(t *testing.T) {
+ cases := []struct {
+ Input string
+ Output map[string]string
+ Error bool
+ }{
+ {
+ "key=value",
+ map[string]string{"key": "value"},
+ false,
+ },
+
+ {
+ "key=",
+ map[string]string{"key": ""},
+ false,
+ },
+
+ {
+ "key=foo=bar",
+ map[string]string{"key": "foo=bar"},
+ false,
+ },
+
+ {
+ "key",
+ nil,
+ true,
+ },
+ }
+
+ for _, tc := range cases {
+ f := new(Flag)
+ err := f.Set(tc.Input)
+ if (err != nil) != tc.Error {
+ t.Fatalf("bad error. Input: %#v", tc.Input)
+ }
+
+ actual := map[string]string(*f)
+ if !reflect.DeepEqual(actual, tc.Output) {
+ t.Fatalf("bad: %#v", actual)
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/flag-slice/flag.go b/vendor/github.com/hashicorp/vault/helper/flag-slice/flag.go
new file mode 100644
index 0000000..da75149
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/flag-slice/flag.go
@@ -0,0 +1,16 @@
+package sliceflag
+
+import "strings"
+
+// StringFlag implements the flag.Value interface and allows multiple
+// calls to the same variable to append a list.
+type StringFlag []string
+
+func (s *StringFlag) String() string {
+ return strings.Join(*s, ",")
+}
+
+func (s *StringFlag) Set(value string) error {
+ *s = append(*s, value)
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/flag-slice/flag_test.go b/vendor/github.com/hashicorp/vault/helper/flag-slice/flag_test.go
new file mode 100644
index 0000000..f72e1d9
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/flag-slice/flag_test.go
@@ -0,0 +1,33 @@
+package sliceflag
+
+import (
+ "flag"
+ "reflect"
+ "testing"
+)
+
+func TestStringFlag_implements(t *testing.T) {
+ var raw interface{}
+ raw = new(StringFlag)
+ if _, ok := raw.(flag.Value); !ok {
+ t.Fatalf("StringFlag should be a Value")
+ }
+}
+
+func TestStringFlagSet(t *testing.T) {
+ sv := new(StringFlag)
+ err := sv.Set("foo")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ err = sv.Set("bar")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ expected := []string{"foo", "bar"}
+ if !reflect.DeepEqual([]string(*sv), expected) {
+ t.Fatalf("Bad: %#v", sv)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/forwarding/types.pb.go b/vendor/github.com/hashicorp/vault/helper/forwarding/types.pb.go
new file mode 100644
index 0000000..27ca3fc
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/forwarding/types.pb.go
@@ -0,0 +1,262 @@
+// Code generated by protoc-gen-go.
+// source: types.proto
+// DO NOT EDIT!
+
+/*
+Package forwarding is a generated protocol buffer package.
+
+It is generated from these files:
+ types.proto
+
+It has these top-level messages:
+ Request
+ URL
+ HeaderEntry
+ Response
+*/
+package forwarding
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type Request struct {
+ // Not used right now but reserving in case it turns out that streaming
+ // makes things more economical on the gRPC side
+ // uint64 id = 1;
+ Method string `protobuf:"bytes,2,opt,name=method" json:"method,omitempty"`
+ Url *URL `protobuf:"bytes,3,opt,name=url" json:"url,omitempty"`
+ HeaderEntries map[string]*HeaderEntry `protobuf:"bytes,4,rep,name=header_entries,json=headerEntries" json:"header_entries,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ Body []byte `protobuf:"bytes,5,opt,name=body,proto3" json:"body,omitempty"`
+ Host string `protobuf:"bytes,6,opt,name=host" json:"host,omitempty"`
+ RemoteAddr string `protobuf:"bytes,7,opt,name=remote_addr,json=remoteAddr" json:"remote_addr,omitempty"`
+ PeerCertificates [][]byte `protobuf:"bytes,8,rep,name=peer_certificates,json=peerCertificates,proto3" json:"peer_certificates,omitempty"`
+}
+
+func (m *Request) Reset() { *m = Request{} }
+func (m *Request) String() string { return proto.CompactTextString(m) }
+func (*Request) ProtoMessage() {}
+func (*Request) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *Request) GetMethod() string {
+ if m != nil {
+ return m.Method
+ }
+ return ""
+}
+
+func (m *Request) GetUrl() *URL {
+ if m != nil {
+ return m.Url
+ }
+ return nil
+}
+
+func (m *Request) GetHeaderEntries() map[string]*HeaderEntry {
+ if m != nil {
+ return m.HeaderEntries
+ }
+ return nil
+}
+
+func (m *Request) GetBody() []byte {
+ if m != nil {
+ return m.Body
+ }
+ return nil
+}
+
+func (m *Request) GetHost() string {
+ if m != nil {
+ return m.Host
+ }
+ return ""
+}
+
+func (m *Request) GetRemoteAddr() string {
+ if m != nil {
+ return m.RemoteAddr
+ }
+ return ""
+}
+
+func (m *Request) GetPeerCertificates() [][]byte {
+ if m != nil {
+ return m.PeerCertificates
+ }
+ return nil
+}
+
+type URL struct {
+ Scheme string `protobuf:"bytes,1,opt,name=scheme" json:"scheme,omitempty"`
+ Opaque string `protobuf:"bytes,2,opt,name=opaque" json:"opaque,omitempty"`
+ // This isn't needed now but might be in the future, so we'll skip the
+ // number to keep the ordering in net/url
+ // UserInfo user = 3;
+ Host string `protobuf:"bytes,4,opt,name=host" json:"host,omitempty"`
+ Path string `protobuf:"bytes,5,opt,name=path" json:"path,omitempty"`
+ RawPath string `protobuf:"bytes,6,opt,name=raw_path,json=rawPath" json:"raw_path,omitempty"`
+ // This also isn't needed right now, but we'll reserve the number
+ // bool force_query = 7;
+ RawQuery string `protobuf:"bytes,8,opt,name=raw_query,json=rawQuery" json:"raw_query,omitempty"`
+ Fragment string `protobuf:"bytes,9,opt,name=fragment" json:"fragment,omitempty"`
+}
+
+func (m *URL) Reset() { *m = URL{} }
+func (m *URL) String() string { return proto.CompactTextString(m) }
+func (*URL) ProtoMessage() {}
+func (*URL) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+func (m *URL) GetScheme() string {
+ if m != nil {
+ return m.Scheme
+ }
+ return ""
+}
+
+func (m *URL) GetOpaque() string {
+ if m != nil {
+ return m.Opaque
+ }
+ return ""
+}
+
+func (m *URL) GetHost() string {
+ if m != nil {
+ return m.Host
+ }
+ return ""
+}
+
+func (m *URL) GetPath() string {
+ if m != nil {
+ return m.Path
+ }
+ return ""
+}
+
+func (m *URL) GetRawPath() string {
+ if m != nil {
+ return m.RawPath
+ }
+ return ""
+}
+
+func (m *URL) GetRawQuery() string {
+ if m != nil {
+ return m.RawQuery
+ }
+ return ""
+}
+
+func (m *URL) GetFragment() string {
+ if m != nil {
+ return m.Fragment
+ }
+ return ""
+}
+
+type HeaderEntry struct {
+ Values []string `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"`
+}
+
+func (m *HeaderEntry) Reset() { *m = HeaderEntry{} }
+func (m *HeaderEntry) String() string { return proto.CompactTextString(m) }
+func (*HeaderEntry) ProtoMessage() {}
+func (*HeaderEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+
+func (m *HeaderEntry) GetValues() []string {
+ if m != nil {
+ return m.Values
+ }
+ return nil
+}
+
+type Response struct {
+ // Not used right now but reserving in case it turns out that streaming
+ // makes things more economical on the gRPC side
+ // uint64 id = 1;
+ StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode" json:"status_code,omitempty"`
+ Body []byte `protobuf:"bytes,3,opt,name=body,proto3" json:"body,omitempty"`
+ // Added in 0.6.2 to ensure that the content-type is set appropriately, as
+ // well as any other information
+ HeaderEntries map[string]*HeaderEntry `protobuf:"bytes,4,rep,name=header_entries,json=headerEntries" json:"header_entries,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+}
+
+func (m *Response) Reset() { *m = Response{} }
+func (m *Response) String() string { return proto.CompactTextString(m) }
+func (*Response) ProtoMessage() {}
+func (*Response) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+
+func (m *Response) GetStatusCode() uint32 {
+ if m != nil {
+ return m.StatusCode
+ }
+ return 0
+}
+
+func (m *Response) GetBody() []byte {
+ if m != nil {
+ return m.Body
+ }
+ return nil
+}
+
+func (m *Response) GetHeaderEntries() map[string]*HeaderEntry {
+ if m != nil {
+ return m.HeaderEntries
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*Request)(nil), "forwarding.Request")
+ proto.RegisterType((*URL)(nil), "forwarding.URL")
+ proto.RegisterType((*HeaderEntry)(nil), "forwarding.HeaderEntry")
+ proto.RegisterType((*Response)(nil), "forwarding.Response")
+}
+
+func init() { proto.RegisterFile("types.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 437 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x53, 0xc1, 0x6e, 0xd4, 0x30,
+ 0x10, 0x95, 0xd7, 0xdb, 0xdd, 0x64, 0xd2, 0x42, 0xf1, 0x01, 0x4c, 0x39, 0x10, 0x56, 0x02, 0x22,
+ 0x21, 0xf6, 0xb0, 0x5c, 0x10, 0x37, 0x54, 0x21, 0x71, 0x28, 0x08, 0x2c, 0xf5, 0xc0, 0x29, 0x72,
+ 0xd7, 0xb3, 0xcd, 0x8a, 0x26, 0xce, 0xda, 0x0e, 0xab, 0x7c, 0x16, 0xff, 0xc4, 0x89, 0xaf, 0x40,
+ 0xb6, 0x43, 0x1b, 0x84, 0x10, 0xa7, 0x9e, 0x76, 0xde, 0x7b, 0xb3, 0xe3, 0x79, 0x33, 0x13, 0xc8,
+ 0x5c, 0xdf, 0xa2, 0x5d, 0xb6, 0x46, 0x3b, 0xcd, 0x60, 0xa3, 0xcd, 0x5e, 0x1a, 0xb5, 0x6d, 0x2e,
+ 0x17, 0x3f, 0x26, 0x30, 0x17, 0xb8, 0xeb, 0xd0, 0x3a, 0x76, 0x1f, 0x66, 0x35, 0xba, 0x4a, 0x2b,
+ 0x3e, 0xc9, 0x49, 0x91, 0x8a, 0x01, 0xb1, 0x27, 0x40, 0x3b, 0x73, 0xc5, 0x69, 0x4e, 0x8a, 0x6c,
+ 0x75, 0x77, 0x79, 0xf3, 0xef, 0xe5, 0xb9, 0x38, 0x13, 0x5e, 0x63, 0x1f, 0xe0, 0x4e, 0x85, 0x52,
+ 0xa1, 0x29, 0xb1, 0x71, 0x66, 0x8b, 0x96, 0x4f, 0x73, 0x5a, 0x64, 0xab, 0x67, 0xe3, 0xec, 0xe1,
+ 0x9d, 0xe5, 0xfb, 0x90, 0xf9, 0x2e, 0x26, 0xfa, 0x9f, 0x5e, 0x1c, 0x55, 0x63, 0x8e, 0x31, 0x98,
+ 0x5e, 0x68, 0xd5, 0xf3, 0x83, 0x9c, 0x14, 0x87, 0x22, 0xc4, 0x9e, 0xab, 0xb4, 0x75, 0x7c, 0x16,
+ 0x7a, 0x0b, 0x31, 0x7b, 0x0c, 0x99, 0xc1, 0x5a, 0x3b, 0x2c, 0xa5, 0x52, 0x86, 0xcf, 0x83, 0x04,
+ 0x91, 0x7a, 0xab, 0x94, 0x61, 0x2f, 0xe0, 0x5e, 0x8b, 0x68, 0xca, 0x35, 0x1a, 0xb7, 0xdd, 0x6c,
+ 0xd7, 0xd2, 0xa1, 0xe5, 0x49, 0x4e, 0x8b, 0x43, 0x71, 0xec, 0x85, 0xd3, 0x11, 0x7f, 0xf2, 0x05,
+ 0xd8, 0xdf, 0xad, 0xb1, 0x63, 0xa0, 0x5f, 0xb1, 0xe7, 0x24, 0xd4, 0xf6, 0x21, 0x7b, 0x09, 0x07,
+ 0xdf, 0xe4, 0x55, 0x87, 0x61, 0x4c, 0xd9, 0xea, 0xc1, 0xd8, 0xe3, 0x4d, 0x81, 0x5e, 0xc4, 0xac,
+ 0x37, 0x93, 0xd7, 0x64, 0xf1, 0x9d, 0x00, 0x3d, 0x17, 0x67, 0x7e, 0xc4, 0x76, 0x5d, 0x61, 0x8d,
+ 0x43, 0xbd, 0x01, 0x79, 0x5e, 0xb7, 0x72, 0x37, 0xd4, 0x4c, 0xc5, 0x80, 0xae, 0x4d, 0x4f, 0x47,
+ 0xa6, 0x19, 0x4c, 0x5b, 0xe9, 0xaa, 0x30, 0x9c, 0x54, 0x84, 0x98, 0x3d, 0x84, 0xc4, 0xc8, 0x7d,
+ 0x19, 0xf8, 0x38, 0xa0, 0xb9, 0x91, 0xfb, 0x4f, 0x5e, 0x7a, 0x04, 0xa9, 0x97, 0x76, 0x1d, 0x9a,
+ 0x9e, 0x27, 0x41, 0xf3, 0xb9, 0x9f, 0x3d, 0x66, 0x27, 0x90, 0x6c, 0x8c, 0xbc, 0xac, 0xb1, 0x71,
+ 0x3c, 0x8d, 0xda, 0x6f, 0xbc, 0x78, 0x0a, 0xd9, 0xc8, 0x8d, 0x6f, 0x31, 0xf8, 0xb1, 0x9c, 0xe4,
+ 0xd4, 0xb7, 0x18, 0xd1, 0xe2, 0x27, 0x81, 0x44, 0xa0, 0x6d, 0x75, 0x63, 0xd1, 0x2f, 0xc4, 0x3a,
+ 0xe9, 0x3a, 0x5b, 0xae, 0xb5, 0x8a, 0x66, 0x8e, 0x04, 0x44, 0xea, 0x54, 0x2b, 0xbc, 0xde, 0x2c,
+ 0x1d, 0x6d, 0xf6, 0xe3, 0x3f, 0x8e, 0xe7, 0xf9, 0x9f, 0xc7, 0x13, 0x9f, 0xf8, 0xff, 0xf5, 0xdc,
+ 0xe2, 0x1e, 0x2f, 0x66, 0xe1, 0x0b, 0x7a, 0xf5, 0x2b, 0x00, 0x00, 0xff, 0xff, 0x57, 0x73, 0xdf,
+ 0x6b, 0x50, 0x03, 0x00, 0x00,
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/forwarding/types.proto b/vendor/github.com/hashicorp/vault/helper/forwarding/types.proto
new file mode 100644
index 0000000..02c3518
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/forwarding/types.proto
@@ -0,0 +1,46 @@
+syntax = "proto3";
+
+package forwarding;
+
+message Request {
+ // Not used right now but reserving in case it turns out that streaming
+ // makes things more economical on the gRPC side
+ //uint64 id = 1;
+ string method = 2;
+ URL url = 3;
+ map header_entries = 4;
+ bytes body = 5;
+ string host = 6;
+ string remote_addr = 7;
+ repeated bytes peer_certificates = 8;
+}
+
+message URL {
+ string scheme = 1;
+ string opaque = 2;
+ // This isn't needed now but might be in the future, so we'll skip the
+ // number to keep the ordering in net/url
+ //UserInfo user = 3;
+ string host = 4;
+ string path = 5;
+ string raw_path = 6;
+ // This also isn't needed right now, but we'll reserve the number
+ //bool force_query = 7;
+ string raw_query = 8;
+ string fragment = 9;
+}
+
+message HeaderEntry {
+ repeated string values = 1;
+}
+
+message Response {
+ // Not used right now but reserving in case it turns out that streaming
+ // makes things more economical on the gRPC side
+ //uint64 id = 1;
+ uint32 status_code = 2;
+ bytes body = 3;
+ // Added in 0.6.2 to ensure that the content-type is set appropriately, as
+ // well as any other information
+ map header_entries = 4;
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/forwarding/util.go b/vendor/github.com/hashicorp/vault/helper/forwarding/util.go
new file mode 100644
index 0000000..92e6cb1
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/forwarding/util.go
@@ -0,0 +1,203 @@
+package forwarding
+
+import (
+ "bytes"
+ "crypto/tls"
+ "crypto/x509"
+ "net/http"
+ "net/url"
+ "os"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/hashicorp/vault/helper/compressutil"
+ "github.com/hashicorp/vault/helper/jsonutil"
+)
+
+type bufCloser struct {
+ *bytes.Buffer
+}
+
+func (b bufCloser) Close() error {
+ b.Reset()
+ return nil
+}
+
+// GenerateForwardedRequest generates a new http.Request that contains the
+// original requests's information in the new request's body.
+func GenerateForwardedHTTPRequest(req *http.Request, addr string) (*http.Request, error) {
+ fq, err := GenerateForwardedRequest(req)
+ if err != nil {
+ return nil, err
+ }
+
+ var newBody []byte
+ switch os.Getenv("VAULT_MESSAGE_TYPE") {
+ case "json":
+ newBody, err = jsonutil.EncodeJSON(fq)
+ case "json_compress":
+ newBody, err = jsonutil.EncodeJSONAndCompress(fq, &compressutil.CompressionConfig{
+ Type: compressutil.CompressionTypeLzw,
+ })
+ case "proto3":
+ fallthrough
+ default:
+ newBody, err = proto.Marshal(fq)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ ret, err := http.NewRequest("POST", addr, bytes.NewBuffer(newBody))
+ if err != nil {
+ return nil, err
+ }
+
+ return ret, nil
+}
+
+func GenerateForwardedRequest(req *http.Request) (*Request, error) {
+ fq := Request{
+ Method: req.Method,
+ HeaderEntries: make(map[string]*HeaderEntry, len(req.Header)),
+ Host: req.Host,
+ RemoteAddr: req.RemoteAddr,
+ }
+
+ reqURL := req.URL
+ fq.Url = &URL{
+ Scheme: reqURL.Scheme,
+ Opaque: reqURL.Opaque,
+ Host: reqURL.Host,
+ Path: reqURL.Path,
+ RawPath: reqURL.RawPath,
+ RawQuery: reqURL.RawQuery,
+ Fragment: reqURL.Fragment,
+ }
+
+ for k, v := range req.Header {
+ fq.HeaderEntries[k] = &HeaderEntry{
+ Values: v,
+ }
+ }
+
+ buf := bytes.NewBuffer(nil)
+ _, err := buf.ReadFrom(req.Body)
+ if err != nil {
+ return nil, err
+ }
+ fq.Body = buf.Bytes()
+
+ if req.TLS != nil && req.TLS.PeerCertificates != nil && len(req.TLS.PeerCertificates) > 0 {
+ fq.PeerCertificates = make([][]byte, len(req.TLS.PeerCertificates))
+ for i, cert := range req.TLS.PeerCertificates {
+ fq.PeerCertificates[i] = cert.Raw
+ }
+ }
+
+ return &fq, nil
+}
+
+// ParseForwardedRequest generates a new http.Request that is comprised of the
+// values in the given request's body, assuming it correctly parses into a
+// ForwardedRequest.
+func ParseForwardedHTTPRequest(req *http.Request) (*http.Request, error) {
+ buf := bytes.NewBuffer(nil)
+ _, err := buf.ReadFrom(req.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ fq := new(Request)
+ switch os.Getenv("VAULT_MESSAGE_TYPE") {
+ case "json", "json_compress":
+ err = jsonutil.DecodeJSON(buf.Bytes(), fq)
+ default:
+ err = proto.Unmarshal(buf.Bytes(), fq)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ return ParseForwardedRequest(fq)
+}
+
+func ParseForwardedRequest(fq *Request) (*http.Request, error) {
+ buf := bufCloser{
+ Buffer: bytes.NewBuffer(fq.Body),
+ }
+
+ ret := &http.Request{
+ Method: fq.Method,
+ Header: make(map[string][]string, len(fq.HeaderEntries)),
+ Body: buf,
+ Host: fq.Host,
+ RemoteAddr: fq.RemoteAddr,
+ }
+
+ ret.URL = &url.URL{
+ Scheme: fq.Url.Scheme,
+ Opaque: fq.Url.Opaque,
+ Host: fq.Url.Host,
+ Path: fq.Url.Path,
+ RawPath: fq.Url.RawPath,
+ RawQuery: fq.Url.RawQuery,
+ Fragment: fq.Url.Fragment,
+ }
+
+ for k, v := range fq.HeaderEntries {
+ ret.Header[k] = v.Values
+ }
+
+ if fq.PeerCertificates != nil && len(fq.PeerCertificates) > 0 {
+ ret.TLS = &tls.ConnectionState{
+ PeerCertificates: make([]*x509.Certificate, len(fq.PeerCertificates)),
+ }
+ for i, certBytes := range fq.PeerCertificates {
+ cert, err := x509.ParseCertificate(certBytes)
+ if err != nil {
+ return nil, err
+ }
+ ret.TLS.PeerCertificates[i] = cert
+ }
+ }
+
+ return ret, nil
+}
+
+type RPCResponseWriter struct {
+ statusCode int
+ header http.Header
+ body *bytes.Buffer
+}
+
+// NewRPCResponseWriter returns an initialized RPCResponseWriter
+func NewRPCResponseWriter() *RPCResponseWriter {
+ w := &RPCResponseWriter{
+ header: make(http.Header),
+ body: new(bytes.Buffer),
+ statusCode: 200,
+ }
+ //w.header.Set("Content-Type", "application/octet-stream")
+ return w
+}
+
+func (w *RPCResponseWriter) Header() http.Header {
+ return w.header
+}
+
+func (w *RPCResponseWriter) Write(buf []byte) (int, error) {
+ w.body.Write(buf)
+ return len(buf), nil
+}
+
+func (w *RPCResponseWriter) WriteHeader(code int) {
+ w.statusCode = code
+}
+
+func (w *RPCResponseWriter) StatusCode() int {
+ return w.statusCode
+}
+
+func (w *RPCResponseWriter) Body() *bytes.Buffer {
+ return w.body
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/forwarding/util_test.go b/vendor/github.com/hashicorp/vault/helper/forwarding/util_test.go
new file mode 100644
index 0000000..0af2b89
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/forwarding/util_test.go
@@ -0,0 +1,126 @@
+package forwarding
+
+import (
+ "bufio"
+ "bytes"
+ "net/http"
+ "os"
+ "reflect"
+ "testing"
+)
+
+func Test_ForwardedRequest_GenerateParse(t *testing.T) {
+ testForwardedRequestGenerateParse(t)
+}
+
+func Benchmark_ForwardedRequest_GenerateParse_JSON(b *testing.B) {
+ os.Setenv("VAULT_MESSAGE_TYPE", "json")
+ var totalSize int64
+ var numRuns int64
+ for i := 0; i < b.N; i++ {
+ totalSize += testForwardedRequestGenerateParse(b)
+ numRuns++
+ }
+ b.Logf("message size per op: %d", totalSize/numRuns)
+}
+
+func Benchmark_ForwardedRequest_GenerateParse_JSON_Compressed(b *testing.B) {
+ os.Setenv("VAULT_MESSAGE_TYPE", "json_compress")
+ var totalSize int64
+ var numRuns int64
+ for i := 0; i < b.N; i++ {
+ totalSize += testForwardedRequestGenerateParse(b)
+ numRuns++
+ }
+ b.Logf("message size per op: %d", totalSize/numRuns)
+}
+
+func Benchmark_ForwardedRequest_GenerateParse_Proto3(b *testing.B) {
+ os.Setenv("VAULT_MESSAGE_TYPE", "proto3")
+ var totalSize int64
+ var numRuns int64
+ for i := 0; i < b.N; i++ {
+ totalSize += testForwardedRequestGenerateParse(b)
+ numRuns++
+ }
+ b.Logf("message size per op: %d", totalSize/numRuns)
+}
+
+func testForwardedRequestGenerateParse(t testing.TB) int64 {
+ bodBuf := bytes.NewReader([]byte(`{ "foo": "bar", "zip": { "argle": "bargle", neet: 0 } }`))
+ req, err := http.NewRequest("FOOBAR", "https://pushit.real.good:9281/snicketysnack?furbleburble=bloopetybloop", bodBuf)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // We want to get the fields we would expect from an incoming request, so
+ // we write it out and then read it again
+ buf1 := bytes.NewBuffer(nil)
+ err = req.Write(buf1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Read it back in, parsing like a server
+ bufr1 := bufio.NewReader(buf1)
+ initialReq, err := http.ReadRequest(bufr1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Generate the request with the forwarded request in the body
+ req, err = GenerateForwardedHTTPRequest(initialReq, "https://bloopety.bloop:8201")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Perform another "round trip"
+ buf2 := bytes.NewBuffer(nil)
+ err = req.Write(buf2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ size := int64(buf2.Len())
+ bufr2 := bufio.NewReader(buf2)
+ intreq, err := http.ReadRequest(bufr2)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Now extract the forwarded request to generate a final request for processing
+ finalReq, err := ParseForwardedHTTPRequest(intreq)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ switch {
+ case initialReq.Method != finalReq.Method:
+ t.Fatalf("bad method:\ninitialReq:\n%#v\nfinalReq:\n%#v\n", *initialReq, *finalReq)
+ case initialReq.RemoteAddr != finalReq.RemoteAddr:
+ t.Fatalf("bad remoteaddr:\ninitialReq:\n%#v\nfinalReq:\n%#v\n", *initialReq, *finalReq)
+ case initialReq.Host != finalReq.Host:
+ t.Fatalf("bad host:\ninitialReq:\n%#v\nfinalReq:\n%#v\n", *initialReq, *finalReq)
+ case !reflect.DeepEqual(initialReq.URL, finalReq.URL):
+ t.Fatalf("bad url:\ninitialReq:\n%#v\nfinalReq:\n%#v\n", *initialReq.URL, *finalReq.URL)
+ case !reflect.DeepEqual(initialReq.Header, finalReq.Header):
+ t.Fatalf("bad header:\ninitialReq:\n%#v\nfinalReq:\n%#v\n", *initialReq, *finalReq)
+ default:
+ // Compare bodies
+ bodBuf.Seek(0, 0)
+ initBuf := bytes.NewBuffer(nil)
+ _, err = initBuf.ReadFrom(bodBuf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ finBuf := bytes.NewBuffer(nil)
+ _, err = finBuf.ReadFrom(finalReq.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(initBuf.Bytes(), finBuf.Bytes()) {
+ t.Fatalf("badbody :\ninitialReq:\n%#v\nfinalReq:\n%#v\n", initBuf.Bytes(), finBuf.Bytes())
+ }
+ }
+
+ return size
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/gated-writer/writer.go b/vendor/github.com/hashicorp/vault/helper/gated-writer/writer.go
new file mode 100644
index 0000000..9c5aeba
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/gated-writer/writer.go
@@ -0,0 +1,43 @@
+package gatedwriter
+
+import (
+ "io"
+ "sync"
+)
+
+// Writer is an io.Writer implementation that buffers all of its
+// data into an internal buffer until it is told to let data through.
+type Writer struct {
+ Writer io.Writer
+
+ buf [][]byte
+ flush bool
+ lock sync.RWMutex
+}
+
+// Flush tells the Writer to flush any buffered data and to stop
+// buffering.
+func (w *Writer) Flush() {
+ w.lock.Lock()
+ w.flush = true
+ w.lock.Unlock()
+
+ for _, p := range w.buf {
+ w.Write(p)
+ }
+ w.buf = nil
+}
+
+func (w *Writer) Write(p []byte) (n int, err error) {
+ w.lock.RLock()
+ defer w.lock.RUnlock()
+
+ if w.flush {
+ return w.Writer.Write(p)
+ }
+
+ p2 := make([]byte, len(p))
+ copy(p2, p)
+ w.buf = append(w.buf, p2)
+ return len(p), nil
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/gated-writer/writer_test.go b/vendor/github.com/hashicorp/vault/helper/gated-writer/writer_test.go
new file mode 100644
index 0000000..b007ef1
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/gated-writer/writer_test.go
@@ -0,0 +1,34 @@
+package gatedwriter
+
+import (
+ "bytes"
+ "io"
+ "testing"
+)
+
+func TestWriter_impl(t *testing.T) {
+ var _ io.Writer = new(Writer)
+}
+
+func TestWriter(t *testing.T) {
+ buf := new(bytes.Buffer)
+ w := &Writer{Writer: buf}
+ w.Write([]byte("foo\n"))
+ w.Write([]byte("bar\n"))
+
+ if buf.String() != "" {
+ t.Fatalf("bad: %s", buf.String())
+ }
+
+ w.Flush()
+
+ if buf.String() != "foo\nbar\n" {
+ t.Fatalf("bad: %s", buf.String())
+ }
+
+ w.Write([]byte("baz\n"))
+
+ if buf.String() != "foo\nbar\nbaz\n" {
+ t.Fatalf("bad: %s", buf.String())
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/jsonutil/json_test.go b/vendor/github.com/hashicorp/vault/helper/jsonutil/json_test.go
new file mode 100644
index 0000000..53d4adf
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/jsonutil/json_test.go
@@ -0,0 +1,141 @@
+package jsonutil
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/hashicorp/vault/helper/compressutil"
+)
+
+func TestJSONUtil_CompressDecompressJSON(t *testing.T) {
+ expected := map[string]interface{}{
+ "test": "data",
+ "validation": "process",
+ }
+
+ // Compress an object
+ compressedBytes, err := EncodeJSONAndCompress(expected, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(compressedBytes) == 0 {
+ t.Fatal("expected compressed data")
+ }
+
+ // Check if canary is present in the compressed data
+ if compressedBytes[0] != compressutil.CompressionCanaryGzip {
+ t.Fatalf("canary missing in compressed data")
+ }
+
+ // Decompress and decode the compressed information and verify the functional
+ // behavior
+ var actual map[string]interface{}
+ if err = DecodeJSON(compressedBytes, &actual); err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(expected, actual) {
+ t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual)
+ }
+ for key, _ := range actual {
+ delete(actual, key)
+ }
+
+ // Test invalid data
+ if err = DecodeJSON([]byte{}, &actual); err == nil {
+ t.Fatalf("expected a failure")
+ }
+
+ // Test invalid data after the canary byte
+ var buf bytes.Buffer
+ buf.Write([]byte{compressutil.CompressionCanaryGzip})
+ if err = DecodeJSON(buf.Bytes(), &actual); err == nil {
+ t.Fatalf("expected a failure")
+ }
+
+ // Compress an object
+ compressedBytes, err = EncodeJSONAndCompress(expected, &compressutil.CompressionConfig{
+ Type: compressutil.CompressionTypeGzip,
+ GzipCompressionLevel: gzip.BestSpeed,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(compressedBytes) == 0 {
+ t.Fatal("expected compressed data")
+ }
+
+ // Check if canary is present in the compressed data
+ if compressedBytes[0] != compressutil.CompressionCanaryGzip {
+ t.Fatalf("canary missing in compressed data")
+ }
+
+ // Decompress and decode the compressed information and verify the functional
+ // behavior
+ if err = DecodeJSON(compressedBytes, &actual); err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(expected, actual) {
+ t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual)
+ }
+}
+
+func TestJSONUtil_EncodeJSON(t *testing.T) {
+ input := map[string]interface{}{
+ "test": "data",
+ "validation": "process",
+ }
+
+ actualBytes, err := EncodeJSON(input)
+ if err != nil {
+ t.Fatalf("failed to encode JSON: %v", err)
+ }
+
+ actual := strings.TrimSpace(string(actualBytes))
+ expected := `{"test":"data","validation":"process"}`
+
+ if actual != expected {
+ t.Fatalf("bad: encoded JSON: expected:%s\nactual:%s\n", expected, string(actualBytes))
+ }
+}
+
+func TestJSONUtil_DecodeJSON(t *testing.T) {
+ input := `{"test":"data","validation":"process"}`
+
+ var actual map[string]interface{}
+
+ err := DecodeJSON([]byte(input), &actual)
+ if err != nil {
+ fmt.Printf("decoding err: %v\n", err)
+ }
+
+ expected := map[string]interface{}{
+ "test": "data",
+ "validation": "process",
+ }
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
+ }
+}
+
+func TestJSONUtil_DecodeJSONFromReader(t *testing.T) {
+ input := `{"test":"data","validation":"process"}`
+
+ var actual map[string]interface{}
+
+ err := DecodeJSONFromReader(bytes.NewReader([]byte(input)), &actual)
+ if err != nil {
+ fmt.Printf("decoding err: %v\n", err)
+ }
+
+ expected := map[string]interface{}{
+ "test": "data",
+ "validation": "process",
+ }
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/kdf/kdf.go b/vendor/github.com/hashicorp/vault/helper/kdf/kdf.go
new file mode 100644
index 0000000..5009074
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/kdf/kdf.go
@@ -0,0 +1,77 @@
+// This package is used to implement Key Derivation Functions (KDF)
+// based on the recommendations of NIST SP 800-108. These are useful
+// for generating unique-per-transaction keys, or situations in which
+// a key hierarchy may be useful.
+package kdf
+
+import (
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/binary"
+ "fmt"
+)
+
+// PRF is a pseudo-random function that takes a key or seed,
+// as well as additional binary data and generates output that is
+// indistinguishable from random. Examples are cryptographic hash
+// functions or block ciphers.
+type PRF func([]byte, []byte) ([]byte, error)
+
+// CounterMode implements the counter mode KDF that uses a psuedo-random-function (PRF)
+// along with a counter to generate derived keys. The KDF takes a base key
+// a derivation context, and the required number of output bits.
+func CounterMode(prf PRF, prfLen uint32, key []byte, context []byte, bits uint32) ([]byte, error) {
+ // Ensure the PRF is byte aligned
+ if prfLen%8 != 0 {
+ return nil, fmt.Errorf("PRF must be byte aligned")
+ }
+
+ // Ensure the bits required are byte aligned
+ if bits%8 != 0 {
+ return nil, fmt.Errorf("bits required must be byte aligned")
+ }
+
+ // Determine the number of rounds required
+ rounds := bits / prfLen
+ if bits%prfLen != 0 {
+ rounds++
+ }
+
+ // Allocate and setup the input
+ input := make([]byte, 4+len(context)+4)
+ copy(input[4:], context)
+ binary.BigEndian.PutUint32(input[4+len(context):], bits)
+
+ // Iteratively generate more key material
+ var out []byte
+ var i uint32
+ for i = 0; i < rounds; i++ {
+ // Update the counter in the input string
+ binary.BigEndian.PutUint32(input[:4], i)
+
+ // Compute more key material
+ part, err := prf(key, input)
+ if err != nil {
+ return nil, err
+ }
+ if uint32(len(part)*8) != prfLen {
+ return nil, fmt.Errorf("PRF length mis-match (%d vs %d)", len(part)*8, prfLen)
+ }
+ out = append(out, part...)
+ }
+
+ // Return the desired number of output bytes
+ return out[:bits/8], nil
+}
+
+const (
+ // HMACSHA256PRFLen is the length of output from HMACSHA256PRF
+ HMACSHA256PRFLen uint32 = 256
+)
+
+// HMACSHA256PRF is a pseudo-random-function (PRF) that uses an HMAC-SHA256
+func HMACSHA256PRF(key []byte, data []byte) ([]byte, error) {
+ hash := hmac.New(sha256.New, key)
+ hash.Write(data)
+ return hash.Sum(nil), nil
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/kdf/kdf_test.go b/vendor/github.com/hashicorp/vault/helper/kdf/kdf_test.go
new file mode 100644
index 0000000..120c903
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/kdf/kdf_test.go
@@ -0,0 +1,72 @@
+package kdf
+
+import (
+ "bytes"
+ "testing"
+)
+
+func TestCounterMode(t *testing.T) {
+ key := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
+ context := []byte("the quick brown fox")
+ prf := HMACSHA256PRF
+ prfLen := HMACSHA256PRFLen
+
+ // Expect256 was generated in python with
+ // import hashlib, hmac
+ // hash = hashlib.sha256
+ // context = "the quick brown fox"
+ // key = "".join([chr(x) for x in range(1, 17)])
+ // inp = "\x00\x00\x00\x00"+context+"\x00\x00\x01\x00"
+ // digest = hmac.HMAC(key, inp, hash).digest()
+ // print [ord(x) for x in digest]
+ expect256 := []byte{219, 25, 238, 6, 185, 236, 180, 64, 248, 152, 251,
+ 153, 79, 5, 141, 222, 66, 200, 66, 143, 40, 3, 101, 221, 206, 163, 102,
+ 80, 88, 234, 87, 157}
+
+ for _, l := range []uint32{128, 256, 384, 1024} {
+ out, err := CounterMode(prf, prfLen, key, context, l)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if uint32(len(out)*8) != l {
+ t.Fatalf("bad length: %#v", out)
+ }
+
+ if bytes.Contains(out, key) {
+ t.Fatalf("output contains key")
+ }
+
+ if l == 256 && !bytes.Equal(out, expect256) {
+ t.Fatalf("mis-match")
+ }
+ }
+
+}
+
+func TestHMACSHA256PRF(t *testing.T) {
+ key := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
+ data := []byte("foobarbaz")
+ out, err := HMACSHA256PRF(key, data)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if uint32(len(out)*8) != HMACSHA256PRFLen {
+ t.Fatalf("Bad len")
+ }
+
+ // Expect was generated in python with:
+ // import hashlib, hmac
+ // hash = hashlib.sha256
+ // msg = "foobarbaz"
+ // key = "".join([chr(x) for x in range(1, 17)])
+ // hm = hmac.HMAC(key, msg, hash)
+ // print [ord(x) for x in hm.digest()]
+ expect := []byte{9, 50, 146, 8, 188, 130, 150, 107, 205, 147, 82, 170,
+ 253, 183, 26, 38, 167, 194, 220, 111, 56, 118, 219, 209, 31, 52, 137,
+ 90, 246, 133, 191, 124}
+ if !bytes.Equal(expect, out) {
+ t.Fatalf("mis-matched output")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/keysutil/lock_manager.go b/vendor/github.com/hashicorp/vault/helper/keysutil/lock_manager.go
new file mode 100644
index 0000000..e0bdd64
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/keysutil/lock_manager.go
@@ -0,0 +1,391 @@
+package keysutil
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/logical"
+)
+
+const (
+ shared = false
+ exclusive = true
+)
+
+var (
+ errNeedExclusiveLock = errors.New("an exclusive lock is needed for this operation")
+)
+
+// PolicyRequest holds values used when requesting a policy. Most values are
+// only used during an upsert.
+type PolicyRequest struct {
+ // The storage to use
+ Storage logical.Storage
+
+ // The name of the policy
+ Name string
+
+ // The key type
+ KeyType KeyType
+
+ // Whether it should be derived
+ Derived bool
+
+ // Whether to enable convergent encryption
+ Convergent bool
+
+ // Whether to allow export
+ Exportable bool
+
+ // Whether to upsert
+ Upsert bool
+}
+
+type LockManager struct {
+ // A lock for each named key
+ locks map[string]*sync.RWMutex
+
+ // A mutex for the map itself
+ locksMutex sync.RWMutex
+
+ // If caching is enabled, the map of name to in-memory policy cache
+ cache map[string]*Policy
+
+ // Used for global locking, and as the cache map mutex
+ cacheMutex sync.RWMutex
+}
+
+func NewLockManager(cacheDisabled bool) *LockManager {
+ lm := &LockManager{
+ locks: map[string]*sync.RWMutex{},
+ }
+ if !cacheDisabled {
+ lm.cache = map[string]*Policy{}
+ }
+ return lm
+}
+
+func (lm *LockManager) CacheActive() bool {
+ return lm.cache != nil
+}
+
+func (lm *LockManager) InvalidatePolicy(name string) {
+ // Check if it's in our cache. If so, return right away.
+ if lm.CacheActive() {
+ lm.cacheMutex.Lock()
+ defer lm.cacheMutex.Unlock()
+ delete(lm.cache, name)
+ }
+}
+
+func (lm *LockManager) policyLock(name string, lockType bool) *sync.RWMutex {
+ lm.locksMutex.RLock()
+ lock := lm.locks[name]
+ if lock != nil {
+ // We want to give this up before locking the lock, but it's safe --
+ // the only time we ever write to a value in this map is the first time
+ // we access the value, so it won't be changing out from under us
+ lm.locksMutex.RUnlock()
+ if lockType == exclusive {
+ lock.Lock()
+ } else {
+ lock.RLock()
+ }
+ return lock
+ }
+
+ lm.locksMutex.RUnlock()
+ lm.locksMutex.Lock()
+
+ // Don't defer the unlock call because if we get a valid lock below we want
+ // to release the lock mutex right away to avoid the possibility of
+ // deadlock by trying to grab the second lock
+
+ // Check to make sure it hasn't been created since
+ lock = lm.locks[name]
+ if lock != nil {
+ lm.locksMutex.Unlock()
+ if lockType == exclusive {
+ lock.Lock()
+ } else {
+ lock.RLock()
+ }
+ return lock
+ }
+
+ lock = &sync.RWMutex{}
+ lm.locks[name] = lock
+ lm.locksMutex.Unlock()
+ if lockType == exclusive {
+ lock.Lock()
+ } else {
+ lock.RLock()
+ }
+
+ return lock
+}
+
+func (lm *LockManager) UnlockPolicy(lock *sync.RWMutex, lockType bool) {
+ if lockType == exclusive {
+ lock.Unlock()
+ } else {
+ lock.RUnlock()
+ }
+}
+
+// Get the policy with a read lock. If we get an error saying an exclusive lock
+// is needed (for instance, for an upgrade/migration), give up the read lock,
+// call again with an exclusive lock, then swap back out for a read lock.
+func (lm *LockManager) GetPolicyShared(storage logical.Storage, name string) (*Policy, *sync.RWMutex, error) {
+ p, lock, _, err := lm.getPolicyCommon(PolicyRequest{
+ Storage: storage,
+ Name: name,
+ }, shared)
+ if err == nil ||
+ (err != nil && err != errNeedExclusiveLock) {
+ return p, lock, err
+ }
+
+ // Try again while asking for an exlusive lock
+ p, lock, _, err = lm.getPolicyCommon(PolicyRequest{
+ Storage: storage,
+ Name: name,
+ }, exclusive)
+ if err != nil || p == nil || lock == nil {
+ return p, lock, err
+ }
+
+ lock.Unlock()
+
+ p, lock, _, err = lm.getPolicyCommon(PolicyRequest{
+ Storage: storage,
+ Name: name,
+ }, shared)
+ return p, lock, err
+}
+
+// Get the policy with an exclusive lock
+func (lm *LockManager) GetPolicyExclusive(storage logical.Storage, name string) (*Policy, *sync.RWMutex, error) {
+ p, lock, _, err := lm.getPolicyCommon(PolicyRequest{
+ Storage: storage,
+ Name: name,
+ }, exclusive)
+ return p, lock, err
+}
+
+// Get the policy with a read lock; if it returns that an exclusive lock is
+// needed, retry. If successful, call one more time to get a read lock and
+// return the value.
+func (lm *LockManager) GetPolicyUpsert(req PolicyRequest) (*Policy, *sync.RWMutex, bool, error) {
+ req.Upsert = true
+
+ p, lock, _, err := lm.getPolicyCommon(req, shared)
+ if err == nil ||
+ (err != nil && err != errNeedExclusiveLock) {
+ return p, lock, false, err
+ }
+
+ // Try again while asking for an exlusive lock
+ p, lock, upserted, err := lm.getPolicyCommon(req, exclusive)
+ if err != nil || p == nil || lock == nil {
+ return p, lock, upserted, err
+ }
+ lock.Unlock()
+
+ req.Upsert = false
+ // Now get a shared lock for the return, but preserve the value of upserted
+ p, lock, _, err = lm.getPolicyCommon(req, shared)
+
+ return p, lock, upserted, err
+}
+
+// When the function returns, a lock will be held on the policy if err == nil.
+// It is the caller's responsibility to unlock.
+func (lm *LockManager) getPolicyCommon(req PolicyRequest, lockType bool) (*Policy, *sync.RWMutex, bool, error) {
+ lock := lm.policyLock(req.Name, lockType)
+
+ var p *Policy
+ var err error
+
+ // Check if it's in our cache. If so, return right away.
+ if lm.CacheActive() {
+ lm.cacheMutex.RLock()
+ p = lm.cache[req.Name]
+ if p != nil {
+ lm.cacheMutex.RUnlock()
+ return p, lock, false, nil
+ }
+ lm.cacheMutex.RUnlock()
+ }
+
+ // Load it from storage
+ p, err = lm.getStoredPolicy(req.Storage, req.Name)
+ if err != nil {
+ lm.UnlockPolicy(lock, lockType)
+ return nil, nil, false, err
+ }
+
+ if p == nil {
+ // This is the only place we upsert a new policy, so if upsert is not
+ // specified, or the lock type is wrong, unlock before returning
+ if !req.Upsert {
+ lm.UnlockPolicy(lock, lockType)
+ return nil, nil, false, nil
+ }
+
+ if lockType != exclusive {
+ lm.UnlockPolicy(lock, lockType)
+ return nil, nil, false, errNeedExclusiveLock
+ }
+
+ switch req.KeyType {
+ case KeyType_AES256_GCM96:
+ if req.Convergent && !req.Derived {
+ return nil, nil, false, fmt.Errorf("convergent encryption requires derivation to be enabled")
+ }
+
+ case KeyType_ECDSA_P256:
+ if req.Derived || req.Convergent {
+ return nil, nil, false, fmt.Errorf("key derivation and convergent encryption not supported for keys of type %v", KeyType_ECDSA_P256)
+ }
+
+ default:
+ return nil, nil, false, fmt.Errorf("unsupported key type %v", req.KeyType)
+ }
+
+ p = &Policy{
+ Name: req.Name,
+ Type: req.KeyType,
+ Derived: req.Derived,
+ Exportable: req.Exportable,
+ }
+ if req.Derived {
+ p.KDF = Kdf_hkdf_sha256
+ p.ConvergentEncryption = req.Convergent
+ p.ConvergentVersion = 2
+ }
+
+ err = p.Rotate(req.Storage)
+ if err != nil {
+ lm.UnlockPolicy(lock, lockType)
+ return nil, nil, false, err
+ }
+
+ if lm.CacheActive() {
+ // Since we didn't have the policy in the cache, if there was no
+ // error, write the value in.
+ lm.cacheMutex.Lock()
+ defer lm.cacheMutex.Unlock()
+ // Make sure a policy didn't appear. If so, it will only be set if
+ // there was no error, so assume it's good and return that
+ exp := lm.cache[req.Name]
+ if exp != nil {
+ return exp, lock, false, nil
+ }
+ if err == nil {
+ lm.cache[req.Name] = p
+ }
+ }
+
+ // We don't need to worry about upgrading since it will be a new policy
+ return p, lock, true, nil
+ }
+
+ if p.NeedsUpgrade() {
+ if lockType == shared {
+ lm.UnlockPolicy(lock, lockType)
+ return nil, nil, false, errNeedExclusiveLock
+ }
+
+ err = p.Upgrade(req.Storage)
+ if err != nil {
+ lm.UnlockPolicy(lock, lockType)
+ return nil, nil, false, err
+ }
+ }
+
+ if lm.CacheActive() {
+ // Since we didn't have the policy in the cache, if there was no
+ // error, write the value in.
+ lm.cacheMutex.Lock()
+ defer lm.cacheMutex.Unlock()
+ // Make sure a policy didn't appear. If so, it will only be set if
+ // there was no error, so assume it's good and return that
+ exp := lm.cache[req.Name]
+ if exp != nil {
+ return exp, lock, false, nil
+ }
+ if err == nil {
+ lm.cache[req.Name] = p
+ }
+ }
+
+ return p, lock, false, nil
+}
+
+func (lm *LockManager) DeletePolicy(storage logical.Storage, name string) error {
+ lm.cacheMutex.Lock()
+ lock := lm.policyLock(name, exclusive)
+ defer lock.Unlock()
+ defer lm.cacheMutex.Unlock()
+
+ var p *Policy
+ var err error
+
+ if lm.CacheActive() {
+ p = lm.cache[name]
+ }
+ if p == nil {
+ p, err = lm.getStoredPolicy(storage, name)
+ if err != nil {
+ return err
+ }
+ if p == nil {
+ return fmt.Errorf("could not delete policy; not found")
+ }
+ }
+
+ if !p.DeletionAllowed {
+ return fmt.Errorf("deletion is not allowed for this policy")
+ }
+
+ err = storage.Delete("policy/" + name)
+ if err != nil {
+ return fmt.Errorf("error deleting policy %s: %s", name, err)
+ }
+
+ err = storage.Delete("archive/" + name)
+ if err != nil {
+ return fmt.Errorf("error deleting archive %s: %s", name, err)
+ }
+
+ if lm.CacheActive() {
+ delete(lm.cache, name)
+ }
+
+ return nil
+}
+
+func (lm *LockManager) getStoredPolicy(storage logical.Storage, name string) (*Policy, error) {
+ // Check if the policy already exists
+ raw, err := storage.Get("policy/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if raw == nil {
+ return nil, nil
+ }
+
+ // Decode the policy
+ policy := &Policy{
+ Keys: keyEntryMap{},
+ }
+ err = jsonutil.DecodeJSON(raw.Value, policy)
+ if err != nil {
+ return nil, err
+ }
+
+ return policy, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/keysutil/policy.go b/vendor/github.com/hashicorp/vault/helper/keysutil/policy.go
new file mode 100644
index 0000000..1faaca4
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/keysutil/policy.go
@@ -0,0 +1,821 @@
+package keysutil
+
+import (
+ "bytes"
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/hmac"
+ "crypto/rand"
+ "crypto/sha256"
+ "crypto/x509"
+ "encoding/asn1"
+ "encoding/base64"
+ "encoding/json"
+ "encoding/pem"
+ "fmt"
+ "io"
+ "math/big"
+ "strconv"
+ "strings"
+ "time"
+
+ "golang.org/x/crypto/hkdf"
+
+ uuid "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/helper/errutil"
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/helper/kdf"
+ "github.com/hashicorp/vault/logical"
+)
+
+// Careful with iota; don't put anything before it in this const block because
+// we need the default of zero to be the old-style KDF
+const (
+ Kdf_hmac_sha256_counter = iota // built-in helper
+ Kdf_hkdf_sha256 // golang.org/x/crypto/hkdf
+)
+
+// Or this one...we need the default of zero to be the original AES256-GCM96
+const (
+ KeyType_AES256_GCM96 = iota
+ KeyType_ECDSA_P256
+)
+
+const ErrTooOld = "ciphertext or signature version is disallowed by policy (too old)"
+
+type ecdsaSignature struct {
+ R, S *big.Int
+}
+
+type KeyType int
+
+func (kt KeyType) EncryptionSupported() bool {
+ switch kt {
+ case KeyType_AES256_GCM96:
+ return true
+ }
+ return false
+}
+
+func (kt KeyType) DecryptionSupported() bool {
+ switch kt {
+ case KeyType_AES256_GCM96:
+ return true
+ }
+ return false
+}
+
+func (kt KeyType) SigningSupported() bool {
+ switch kt {
+ case KeyType_ECDSA_P256:
+ return true
+ }
+ return false
+}
+
+func (kt KeyType) DerivationSupported() bool {
+ switch kt {
+ case KeyType_AES256_GCM96:
+ return true
+ }
+ return false
+}
+
+func (kt KeyType) String() string {
+ switch kt {
+ case KeyType_AES256_GCM96:
+ return "aes256-gcm96"
+ case KeyType_ECDSA_P256:
+ return "ecdsa-p256"
+ }
+
+ return "[unknown]"
+}
+
+// KeyEntry stores the key and metadata
+type KeyEntry struct {
+ AESKey []byte `json:"key"`
+ HMACKey []byte `json:"hmac_key"`
+ CreationTime int64 `json:"creation_time"`
+ EC_X *big.Int `json:"ec_x"`
+ EC_Y *big.Int `json:"ec_y"`
+ EC_D *big.Int `json:"ec_d"`
+ FormattedPublicKey string `json:"public_key"`
+}
+
+// keyEntryMap is used to allow JSON marshal/unmarshal
+type keyEntryMap map[int]KeyEntry
+
+// MarshalJSON implements JSON marshaling
+func (kem keyEntryMap) MarshalJSON() ([]byte, error) {
+ intermediate := map[string]KeyEntry{}
+ for k, v := range kem {
+ intermediate[strconv.Itoa(k)] = v
+ }
+ return json.Marshal(&intermediate)
+}
+
+// MarshalJSON implements JSON unmarshaling
+func (kem keyEntryMap) UnmarshalJSON(data []byte) error {
+ intermediate := map[string]KeyEntry{}
+ if err := jsonutil.DecodeJSON(data, &intermediate); err != nil {
+ return err
+ }
+ for k, v := range intermediate {
+ keyval, err := strconv.Atoi(k)
+ if err != nil {
+ return err
+ }
+ kem[keyval] = v
+ }
+
+ return nil
+}
+
+// Policy is the struct used to store metadata
+type Policy struct {
+ Name string `json:"name"`
+ Key []byte `json:"key,omitempty"` //DEPRECATED
+ Keys keyEntryMap `json:"keys"`
+
+ // Derived keys MUST provide a context and the master underlying key is
+ // never used. If convergent encryption is true, the context will be used
+ // as the nonce as well.
+ Derived bool `json:"derived"`
+ KDF int `json:"kdf"`
+ ConvergentEncryption bool `json:"convergent_encryption"`
+
+ // Whether the key is exportable
+ Exportable bool `json:"exportable"`
+
+ // The minimum version of the key allowed to be used
+ // for decryption
+ MinDecryptionVersion int `json:"min_decryption_version"`
+
+ // The latest key version in this policy
+ LatestVersion int `json:"latest_version"`
+
+ // The latest key version in the archive. We never delete these, so this is
+ // a max.
+ ArchiveVersion int `json:"archive_version"`
+
+ // Whether the key is allowed to be deleted
+ DeletionAllowed bool `json:"deletion_allowed"`
+
+ // The version of the convergent nonce to use
+ ConvergentVersion int `json:"convergent_version"`
+
+ // The type of key
+ Type KeyType `json:"type"`
+}
+
+// ArchivedKeys stores old keys. This is used to keep the key loading time sane
+// when there are huge numbers of rotations.
+type archivedKeys struct {
+ Keys []KeyEntry `json:"keys"`
+}
+
+func (p *Policy) LoadArchive(storage logical.Storage) (*archivedKeys, error) {
+ archive := &archivedKeys{}
+
+ raw, err := storage.Get("archive/" + p.Name)
+ if err != nil {
+ return nil, err
+ }
+ if raw == nil {
+ archive.Keys = make([]KeyEntry, 0)
+ return archive, nil
+ }
+
+ if err := jsonutil.DecodeJSON(raw.Value, archive); err != nil {
+ return nil, err
+ }
+
+ return archive, nil
+}
+
+func (p *Policy) storeArchive(archive *archivedKeys, storage logical.Storage) error {
+ // Encode the policy
+ buf, err := json.Marshal(archive)
+ if err != nil {
+ return err
+ }
+
+ // Write the policy into storage
+ err = storage.Put(&logical.StorageEntry{
+ Key: "archive/" + p.Name,
+ Value: buf,
+ })
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// handleArchiving manages the movement of keys to and from the policy archive.
+// This should *ONLY* be called from Persist() since it assumes that the policy
+// will be persisted afterwards.
+func (p *Policy) handleArchiving(storage logical.Storage) error {
+ // We need to move keys that are no longer accessible to archivedKeys, and keys
+ // that now need to be accessible back here.
+ //
+ // For safety, because there isn't really a good reason to, we never delete
+ // keys from the archive even when we move them back.
+
+ // Check if we have the latest minimum version in the current set of keys
+ _, keysContainsMinimum := p.Keys[p.MinDecryptionVersion]
+
+ // Sanity checks
+ switch {
+ case p.MinDecryptionVersion < 1:
+ return fmt.Errorf("minimum decryption version of %d is less than 1", p.MinDecryptionVersion)
+ case p.LatestVersion < 1:
+ return fmt.Errorf("latest version of %d is less than 1", p.LatestVersion)
+ case !keysContainsMinimum && p.ArchiveVersion != p.LatestVersion:
+ return fmt.Errorf("need to move keys from archive but archive version not up-to-date")
+ case p.ArchiveVersion > p.LatestVersion:
+ return fmt.Errorf("archive version of %d is greater than the latest version %d",
+ p.ArchiveVersion, p.LatestVersion)
+ case p.MinDecryptionVersion > p.LatestVersion:
+ return fmt.Errorf("minimum decryption version of %d is greater than the latest version %d",
+ p.MinDecryptionVersion, p.LatestVersion)
+ }
+
+ archive, err := p.LoadArchive(storage)
+ if err != nil {
+ return err
+ }
+
+ if !keysContainsMinimum {
+ // Need to move keys *from* archive
+
+ for i := p.MinDecryptionVersion; i <= p.LatestVersion; i++ {
+ p.Keys[i] = archive.Keys[i]
+ }
+
+ return nil
+ }
+
+ // Need to move keys *to* archive
+
+ // We need a size that is equivalent to the latest version (number of keys)
+ // but adding one since slice numbering starts at 0 and we're indexing by
+ // key version
+ if len(archive.Keys) < p.LatestVersion+1 {
+ // Increase the size of the archive slice
+ newKeys := make([]KeyEntry, p.LatestVersion+1)
+ copy(newKeys, archive.Keys)
+ archive.Keys = newKeys
+ }
+
+ // We are storing all keys in the archive, so we ensure that it is up to
+ // date up to p.LatestVersion
+ for i := p.ArchiveVersion + 1; i <= p.LatestVersion; i++ {
+ archive.Keys[i] = p.Keys[i]
+ p.ArchiveVersion = i
+ }
+
+ err = p.storeArchive(archive, storage)
+ if err != nil {
+ return err
+ }
+
+ // Perform deletion afterwards so that if there is an error saving we
+ // haven't messed with the current policy
+ for i := p.LatestVersion - len(p.Keys) + 1; i < p.MinDecryptionVersion; i++ {
+ delete(p.Keys, i)
+ }
+
+ return nil
+}
+
+func (p *Policy) Persist(storage logical.Storage) error {
+ err := p.handleArchiving(storage)
+ if err != nil {
+ return err
+ }
+
+ // Encode the policy
+ buf, err := p.Serialize()
+ if err != nil {
+ return err
+ }
+
+ // Write the policy into storage
+ err = storage.Put(&logical.StorageEntry{
+ Key: "policy/" + p.Name,
+ Value: buf,
+ })
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (p *Policy) Serialize() ([]byte, error) {
+ return json.Marshal(p)
+}
+
+func (p *Policy) NeedsUpgrade() bool {
+ // Ensure we've moved from Key -> Keys
+ if p.Key != nil && len(p.Key) > 0 {
+ return true
+ }
+
+ // With archiving, past assumptions about the length of the keys map are no
+ // longer valid
+ if p.LatestVersion == 0 && len(p.Keys) != 0 {
+ return true
+ }
+
+ // We disallow setting the version to 0, since they start at 1 since moving
+ // to rotate-able keys, so update if it's set to 0
+ if p.MinDecryptionVersion == 0 {
+ return true
+ }
+
+ // On first load after an upgrade, copy keys to the archive
+ if p.ArchiveVersion == 0 {
+ return true
+ }
+
+ // Need to write the version
+ if p.ConvergentEncryption && p.ConvergentVersion == 0 {
+ return true
+ }
+
+ if p.Keys[p.LatestVersion].HMACKey == nil || len(p.Keys[p.LatestVersion].HMACKey) == 0 {
+ return true
+ }
+
+ return false
+}
+
+func (p *Policy) Upgrade(storage logical.Storage) error {
+ persistNeeded := false
+ // Ensure we've moved from Key -> Keys
+ if p.Key != nil && len(p.Key) > 0 {
+ p.MigrateKeyToKeysMap()
+ persistNeeded = true
+ }
+
+ // With archiving, past assumptions about the length of the keys map are no
+ // longer valid
+ if p.LatestVersion == 0 && len(p.Keys) != 0 {
+ p.LatestVersion = len(p.Keys)
+ persistNeeded = true
+ }
+
+ // We disallow setting the version to 0, since they start at 1 since moving
+ // to rotate-able keys, so update if it's set to 0
+ if p.MinDecryptionVersion == 0 {
+ p.MinDecryptionVersion = 1
+ persistNeeded = true
+ }
+
+ // On first load after an upgrade, copy keys to the archive
+ if p.ArchiveVersion == 0 {
+ persistNeeded = true
+ }
+
+ if p.ConvergentEncryption && p.ConvergentVersion == 0 {
+ p.ConvergentVersion = 1
+ persistNeeded = true
+ }
+
+ if p.Keys[p.LatestVersion].HMACKey == nil || len(p.Keys[p.LatestVersion].HMACKey) == 0 {
+ entry := p.Keys[p.LatestVersion]
+ hmacKey, err := uuid.GenerateRandomBytes(32)
+ if err != nil {
+ return err
+ }
+ entry.HMACKey = hmacKey
+ p.Keys[p.LatestVersion] = entry
+ persistNeeded = true
+ }
+
+ if persistNeeded {
+ err := p.Persist(storage)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// DeriveKey is used to derive the encryption key that should be used depending
+// on the policy. If derivation is disabled the raw key is used and no context
+// is required, otherwise the KDF mode is used with the context to derive the
+// proper key.
+func (p *Policy) DeriveKey(context []byte, ver int) ([]byte, error) {
+ if !p.Type.DerivationSupported() {
+ return nil, errutil.UserError{Err: fmt.Sprintf("derivation not supported for key type %v", p.Type)}
+ }
+
+ if p.Keys == nil || p.LatestVersion == 0 {
+ return nil, errutil.InternalError{Err: "unable to access the key; no key versions found"}
+ }
+
+ if ver <= 0 || ver > p.LatestVersion {
+ return nil, errutil.UserError{Err: "invalid key version"}
+ }
+
+ // Fast-path non-derived keys
+ if !p.Derived {
+ return p.Keys[ver].AESKey, nil
+ }
+
+ // Ensure a context is provided
+ if len(context) == 0 {
+ return nil, errutil.UserError{Err: "missing 'context' for key deriviation. The key was created using a derived key, which means additional, per-request information must be included in order to encrypt or decrypt information"}
+ }
+
+ switch p.KDF {
+ case Kdf_hmac_sha256_counter:
+ prf := kdf.HMACSHA256PRF
+ prfLen := kdf.HMACSHA256PRFLen
+ return kdf.CounterMode(prf, prfLen, p.Keys[ver].AESKey, context, 256)
+ case Kdf_hkdf_sha256:
+ reader := hkdf.New(sha256.New, p.Keys[ver].AESKey, nil, context)
+ derBytes := bytes.NewBuffer(nil)
+ derBytes.Grow(32)
+ limReader := &io.LimitedReader{
+ R: reader,
+ N: 32,
+ }
+ n, err := derBytes.ReadFrom(limReader)
+ if err != nil {
+ return nil, errutil.InternalError{Err: fmt.Sprintf("error reading returned derived bytes: %v", err)}
+ }
+ if n != 32 {
+ return nil, errutil.InternalError{Err: fmt.Sprintf("unable to read enough derived bytes, needed 32, got %d", n)}
+ }
+ return derBytes.Bytes(), nil
+ default:
+ return nil, errutil.InternalError{Err: "unsupported key derivation mode"}
+ }
+}
+
+func (p *Policy) Encrypt(context, nonce []byte, value string) (string, error) {
+ if !p.Type.EncryptionSupported() {
+ return "", errutil.UserError{Err: fmt.Sprintf("message encryption not supported for key type %v", p.Type)}
+ }
+
+ // Guard against a potentially invalid key type
+ switch p.Type {
+ case KeyType_AES256_GCM96:
+ default:
+ return "", errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)}
+ }
+
+ // Decode the plaintext value
+ plaintext, err := base64.StdEncoding.DecodeString(value)
+ if err != nil {
+ return "", errutil.UserError{Err: "failed to base64-decode plaintext"}
+ }
+
+ // Derive the key that should be used
+ key, err := p.DeriveKey(context, p.LatestVersion)
+ if err != nil {
+ return "", err
+ }
+
+ // Guard against a potentially invalid key type
+ switch p.Type {
+ case KeyType_AES256_GCM96:
+ default:
+ return "", errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)}
+ }
+
+ // Setup the cipher
+ aesCipher, err := aes.NewCipher(key)
+ if err != nil {
+ return "", errutil.InternalError{Err: err.Error()}
+ }
+
+ // Setup the GCM AEAD
+ gcm, err := cipher.NewGCM(aesCipher)
+ if err != nil {
+ return "", errutil.InternalError{Err: err.Error()}
+ }
+
+ if p.ConvergentEncryption {
+ switch p.ConvergentVersion {
+ case 1:
+ if len(nonce) != gcm.NonceSize() {
+ return "", errutil.UserError{Err: fmt.Sprintf("base64-decoded nonce must be %d bytes long when using convergent encryption with this key", gcm.NonceSize())}
+ }
+ default:
+ nonceHmac := hmac.New(sha256.New, context)
+ nonceHmac.Write(plaintext)
+ nonceSum := nonceHmac.Sum(nil)
+ nonce = nonceSum[:gcm.NonceSize()]
+ }
+ } else {
+ // Compute random nonce
+ nonce, err = uuid.GenerateRandomBytes(gcm.NonceSize())
+ if err != nil {
+ return "", errutil.InternalError{Err: err.Error()}
+ }
+ }
+
+ // Encrypt and tag with GCM
+ out := gcm.Seal(nil, nonce, plaintext, nil)
+
+ // Place the encrypted data after the nonce
+ full := out
+ if !p.ConvergentEncryption || p.ConvergentVersion > 1 {
+ full = append(nonce, out...)
+ }
+
+ // Convert to base64
+ encoded := base64.StdEncoding.EncodeToString(full)
+
+ // Prepend some information
+ encoded = "vault:v" + strconv.Itoa(p.LatestVersion) + ":" + encoded
+
+ return encoded, nil
+}
+
+func (p *Policy) Decrypt(context, nonce []byte, value string) (string, error) {
+ if !p.Type.DecryptionSupported() {
+ return "", errutil.UserError{Err: fmt.Sprintf("message decryption not supported for key type %v", p.Type)}
+ }
+
+ // Verify the prefix
+ if !strings.HasPrefix(value, "vault:v") {
+ return "", errutil.UserError{Err: "invalid ciphertext: no prefix"}
+ }
+
+ if p.ConvergentEncryption && p.ConvergentVersion == 1 && (nonce == nil || len(nonce) == 0) {
+ return "", errutil.UserError{Err: "invalid convergent nonce supplied"}
+ }
+
+ splitVerCiphertext := strings.SplitN(strings.TrimPrefix(value, "vault:v"), ":", 2)
+ if len(splitVerCiphertext) != 2 {
+ return "", errutil.UserError{Err: "invalid ciphertext: wrong number of fields"}
+ }
+
+ ver, err := strconv.Atoi(splitVerCiphertext[0])
+ if err != nil {
+ return "", errutil.UserError{Err: "invalid ciphertext: version number could not be decoded"}
+ }
+
+ if ver == 0 {
+ // Compatibility mode with initial implementation, where keys start at
+ // zero
+ ver = 1
+ }
+
+ if ver > p.LatestVersion {
+ return "", errutil.UserError{Err: "invalid ciphertext: version is too new"}
+ }
+
+ if p.MinDecryptionVersion > 0 && ver < p.MinDecryptionVersion {
+ return "", errutil.UserError{Err: ErrTooOld}
+ }
+
+ // Derive the key that should be used
+ key, err := p.DeriveKey(context, ver)
+ if err != nil {
+ return "", err
+ }
+
+ // Guard against a potentially invalid key type
+ switch p.Type {
+ case KeyType_AES256_GCM96:
+ default:
+ return "", errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)}
+ }
+
+ // Decode the base64
+ decoded, err := base64.StdEncoding.DecodeString(splitVerCiphertext[1])
+ if err != nil {
+ return "", errutil.UserError{Err: "invalid ciphertext: could not decode base64"}
+ }
+
+ // Setup the cipher
+ aesCipher, err := aes.NewCipher(key)
+ if err != nil {
+ return "", errutil.InternalError{Err: err.Error()}
+ }
+
+ // Setup the GCM AEAD
+ gcm, err := cipher.NewGCM(aesCipher)
+ if err != nil {
+ return "", errutil.InternalError{Err: err.Error()}
+ }
+
+ // Extract the nonce and ciphertext
+ var ciphertext []byte
+ if p.ConvergentEncryption && p.ConvergentVersion < 2 {
+ ciphertext = decoded
+ } else {
+ nonce = decoded[:gcm.NonceSize()]
+ ciphertext = decoded[gcm.NonceSize():]
+ }
+
+ // Verify and Decrypt
+ plain, err := gcm.Open(nil, nonce, ciphertext, nil)
+ if err != nil {
+ return "", errutil.UserError{Err: "invalid ciphertext: unable to decrypt"}
+ }
+
+ return base64.StdEncoding.EncodeToString(plain), nil
+}
+
+func (p *Policy) HMACKey(version int) ([]byte, error) {
+ if version < p.MinDecryptionVersion {
+ return nil, fmt.Errorf("key version disallowed by policy (minimum is %d)", p.MinDecryptionVersion)
+ }
+
+ if version > p.LatestVersion {
+ return nil, fmt.Errorf("key version does not exist; latest key version is %d", p.LatestVersion)
+ }
+
+ if p.Keys[version].HMACKey == nil {
+ return nil, fmt.Errorf("no HMAC key exists for that key version")
+ }
+
+ return p.Keys[version].HMACKey, nil
+}
+
+func (p *Policy) Sign(hashedInput []byte) (string, error) {
+ if !p.Type.SigningSupported() {
+ return "", fmt.Errorf("message signing not supported for key type %v", p.Type)
+ }
+
+ var sig []byte
+ switch p.Type {
+ case KeyType_ECDSA_P256:
+ keyParams := p.Keys[p.LatestVersion]
+ key := &ecdsa.PrivateKey{
+ PublicKey: ecdsa.PublicKey{
+ Curve: elliptic.P256(),
+ X: keyParams.EC_X,
+ Y: keyParams.EC_Y,
+ },
+ D: keyParams.EC_D,
+ }
+ r, s, err := ecdsa.Sign(rand.Reader, key, hashedInput)
+ if err != nil {
+ return "", err
+ }
+ marshaledSig, err := asn1.Marshal(ecdsaSignature{
+ R: r,
+ S: s,
+ })
+ if err != nil {
+ return "", err
+ }
+ sig = marshaledSig
+
+ default:
+ return "", fmt.Errorf("unsupported key type %v", p.Type)
+ }
+
+ // Convert to base64
+ encoded := base64.StdEncoding.EncodeToString(sig)
+
+ // Prepend some information
+ encoded = "vault:v" + strconv.Itoa(p.LatestVersion) + ":" + encoded
+
+ return encoded, nil
+}
+
+func (p *Policy) VerifySignature(hashedInput []byte, sig string) (bool, error) {
+ if !p.Type.SigningSupported() {
+ return false, errutil.UserError{Err: fmt.Sprintf("message verification not supported for key type %v", p.Type)}
+ }
+
+ // Verify the prefix
+ if !strings.HasPrefix(sig, "vault:v") {
+ return false, errutil.UserError{Err: "invalid signature: no prefix"}
+ }
+
+ splitVerSig := strings.SplitN(strings.TrimPrefix(sig, "vault:v"), ":", 2)
+ if len(splitVerSig) != 2 {
+ return false, errutil.UserError{Err: "invalid signature: wrong number of fields"}
+ }
+
+ ver, err := strconv.Atoi(splitVerSig[0])
+ if err != nil {
+ return false, errutil.UserError{Err: "invalid signature: version number could not be decoded"}
+ }
+
+ if ver > p.LatestVersion {
+ return false, errutil.UserError{Err: "invalid signature: version is too new"}
+ }
+
+ if p.MinDecryptionVersion > 0 && ver < p.MinDecryptionVersion {
+ return false, errutil.UserError{Err: ErrTooOld}
+ }
+
+ switch p.Type {
+ case KeyType_ECDSA_P256:
+ asn1Sig, err := base64.StdEncoding.DecodeString(splitVerSig[1])
+ if err != nil {
+ return false, errutil.UserError{Err: "invalid base64 signature value"}
+ }
+
+ var ecdsaSig ecdsaSignature
+ rest, err := asn1.Unmarshal(asn1Sig, &ecdsaSig)
+ if err != nil {
+ return false, errutil.UserError{Err: "supplied signature is invalid"}
+ }
+ if rest != nil && len(rest) != 0 {
+ return false, errutil.UserError{Err: "supplied signature contains extra data"}
+ }
+
+ keyParams := p.Keys[ver]
+ key := &ecdsa.PublicKey{
+ Curve: elliptic.P256(),
+ X: keyParams.EC_X,
+ Y: keyParams.EC_Y,
+ }
+
+ return ecdsa.Verify(key, hashedInput, ecdsaSig.R, ecdsaSig.S), nil
+ default:
+ return false, errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)}
+ }
+
+ return false, errutil.InternalError{Err: "no valid key type found"}
+}
+
+func (p *Policy) Rotate(storage logical.Storage) error {
+ if p.Keys == nil {
+ // This is an initial key rotation when generating a new policy. We
+ // don't need to call migrate here because if we've called getPolicy to
+ // get the policy in the first place it will have been run.
+ p.Keys = keyEntryMap{}
+ }
+
+ p.LatestVersion += 1
+ entry := KeyEntry{
+ CreationTime: time.Now().Unix(),
+ }
+
+ hmacKey, err := uuid.GenerateRandomBytes(32)
+ if err != nil {
+ return err
+ }
+ entry.HMACKey = hmacKey
+
+ switch p.Type {
+ case KeyType_AES256_GCM96:
+ // Generate a 256bit key
+ newKey, err := uuid.GenerateRandomBytes(32)
+ if err != nil {
+ return err
+ }
+ entry.AESKey = newKey
+
+ case KeyType_ECDSA_P256:
+ privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ if err != nil {
+ return err
+ }
+ entry.EC_D = privKey.D
+ entry.EC_X = privKey.X
+ entry.EC_Y = privKey.Y
+ derBytes, err := x509.MarshalPKIXPublicKey(privKey.Public())
+ if err != nil {
+ return fmt.Errorf("error marshaling public key: %s", err)
+ }
+ pemBlock := &pem.Block{
+ Type: "PUBLIC KEY",
+ Bytes: derBytes,
+ }
+ pemBytes := pem.EncodeToMemory(pemBlock)
+ if pemBytes == nil || len(pemBytes) == 0 {
+ return fmt.Errorf("error PEM-encoding public key")
+ }
+ entry.FormattedPublicKey = string(pemBytes)
+ }
+
+ p.Keys[p.LatestVersion] = entry
+
+ // This ensures that with new key creations min decryption version is set
+ // to 1 rather than the int default of 0, since keys start at 1 (either
+ // fresh or after migration to the key map)
+ if p.MinDecryptionVersion == 0 {
+ p.MinDecryptionVersion = 1
+ }
+
+ return p.Persist(storage)
+}
+
+func (p *Policy) MigrateKeyToKeysMap() {
+ p.Keys = keyEntryMap{
+ 1: KeyEntry{
+ AESKey: p.Key,
+ CreationTime: time.Now().Unix(),
+ },
+ }
+ p.Key = nil
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/keysutil/policy_test.go b/vendor/github.com/hashicorp/vault/helper/keysutil/policy_test.go
new file mode 100644
index 0000000..600238b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/keysutil/policy_test.go
@@ -0,0 +1,343 @@
+package keysutil
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+var (
+ keysArchive []KeyEntry
+)
+
+func resetKeysArchive() {
+ keysArchive = []KeyEntry{KeyEntry{}}
+}
+
+func Test_KeyUpgrade(t *testing.T) {
+ testKeyUpgradeCommon(t, NewLockManager(false))
+ testKeyUpgradeCommon(t, NewLockManager(true))
+}
+
+func testKeyUpgradeCommon(t *testing.T, lm *LockManager) {
+ storage := &logical.InmemStorage{}
+ p, lock, upserted, err := lm.GetPolicyUpsert(PolicyRequest{
+ Storage: storage,
+ KeyType: KeyType_AES256_GCM96,
+ Name: "test",
+ })
+ if lock != nil {
+ defer lock.RUnlock()
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ if p == nil {
+ t.Fatal("nil policy")
+ }
+ if !upserted {
+ t.Fatal("expected an upsert")
+ }
+
+ testBytes := make([]byte, len(p.Keys[1].AESKey))
+ copy(testBytes, p.Keys[1].AESKey)
+
+ p.Key = p.Keys[1].AESKey
+ p.Keys = nil
+ p.MigrateKeyToKeysMap()
+ if p.Key != nil {
+ t.Fatal("policy.Key is not nil")
+ }
+ if len(p.Keys) != 1 {
+ t.Fatal("policy.Keys is the wrong size")
+ }
+ if !reflect.DeepEqual(testBytes, p.Keys[1].AESKey) {
+ t.Fatal("key mismatch")
+ }
+}
+
+func Test_ArchivingUpgrade(t *testing.T) {
+ testArchivingUpgradeCommon(t, NewLockManager(false))
+ testArchivingUpgradeCommon(t, NewLockManager(true))
+}
+
+func testArchivingUpgradeCommon(t *testing.T, lm *LockManager) {
+ resetKeysArchive()
+
+ // First, we generate a policy and rotate it a number of times. Each time
+ // we'll ensure that we have the expected number of keys in the archive and
+ // the main keys object, which without changing the min version should be
+ // zero and latest, respectively
+
+ storage := &logical.InmemStorage{}
+ p, lock, _, err := lm.GetPolicyUpsert(PolicyRequest{
+ Storage: storage,
+ KeyType: KeyType_AES256_GCM96,
+ Name: "test",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if p == nil || lock == nil {
+ t.Fatal("nil policy or lock")
+ }
+ lock.RUnlock()
+
+ // Store the initial key in the archive
+ keysArchive = append(keysArchive, p.Keys[1])
+ checkKeys(t, p, storage, "initial", 1, 1, 1)
+
+ for i := 2; i <= 10; i++ {
+ err = p.Rotate(storage)
+ if err != nil {
+ t.Fatal(err)
+ }
+ keysArchive = append(keysArchive, p.Keys[i])
+ checkKeys(t, p, storage, "rotate", i, i, i)
+ }
+
+ // Now, wipe the archive and set the archive version to zero
+ err = storage.Delete("archive/test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ p.ArchiveVersion = 0
+
+ // Store it, but without calling persist, so we don't trigger
+ // handleArchiving()
+ buf, err := p.Serialize()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Write the policy into storage
+ err = storage.Put(&logical.StorageEntry{
+ Key: "policy/" + p.Name,
+ Value: buf,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // If we're caching, expire from the cache since we modified it
+ // under-the-hood
+ if lm.CacheActive() {
+ delete(lm.cache, "test")
+ }
+
+ // Now get the policy again; the upgrade should happen automatically
+ p, lock, err = lm.GetPolicyShared(storage, "test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if p == nil || lock == nil {
+ t.Fatal("nil policy or lock")
+ }
+ lock.RUnlock()
+
+ checkKeys(t, p, storage, "upgrade", 10, 10, 10)
+
+ // Let's check some deletion logic while we're at it
+
+ // The policy should be in there
+ if lm.CacheActive() && lm.cache["test"] == nil {
+ t.Fatal("nil policy in cache")
+ }
+
+ // First we'll do this wrong, by not setting the deletion flag
+ err = lm.DeletePolicy(storage, "test")
+ if err == nil {
+ t.Fatal("got nil error, but should not have been able to delete since we didn't set the deletion flag on the policy")
+ }
+
+ // The policy should still be in there
+ if lm.CacheActive() && lm.cache["test"] == nil {
+ t.Fatal("nil policy in cache")
+ }
+
+ p, lock, err = lm.GetPolicyShared(storage, "test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if p == nil || lock == nil {
+ t.Fatal("policy or lock nil after bad delete")
+ }
+ lock.RUnlock()
+
+ // Now do it properly
+ p.DeletionAllowed = true
+ err = p.Persist(storage)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = lm.DeletePolicy(storage, "test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // The policy should *not* be in there
+ if lm.CacheActive() && lm.cache["test"] != nil {
+ t.Fatal("non-nil policy in cache")
+ }
+
+ p, lock, err = lm.GetPolicyShared(storage, "test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if p != nil || lock != nil {
+ t.Fatal("policy or lock not nil after delete")
+ }
+}
+
+func Test_Archiving(t *testing.T) {
+ testArchivingCommon(t, NewLockManager(false))
+ testArchivingCommon(t, NewLockManager(true))
+}
+
+func testArchivingCommon(t *testing.T, lm *LockManager) {
+ resetKeysArchive()
+
+ // First, we generate a policy and rotate it a number of times. Each time // we'll ensure that we have the expected number of keys in the archive and
+ // the main keys object, which without changing the min version should be
+ // zero and latest, respectively
+
+ storage := &logical.InmemStorage{}
+ p, lock, _, err := lm.GetPolicyUpsert(PolicyRequest{
+ Storage: storage,
+ KeyType: KeyType_AES256_GCM96,
+ Name: "test",
+ })
+ if lock != nil {
+ defer lock.RUnlock()
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ if p == nil {
+ t.Fatal("nil policy")
+ }
+
+ // Store the initial key in the archive
+ keysArchive = append(keysArchive, p.Keys[1])
+ checkKeys(t, p, storage, "initial", 1, 1, 1)
+
+ for i := 2; i <= 10; i++ {
+ err = p.Rotate(storage)
+ if err != nil {
+ t.Fatal(err)
+ }
+ keysArchive = append(keysArchive, p.Keys[i])
+ checkKeys(t, p, storage, "rotate", i, i, i)
+ }
+
+ // Move the min decryption version up
+ for i := 1; i <= 10; i++ {
+ p.MinDecryptionVersion = i
+
+ err = p.Persist(storage)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // We expect to find:
+ // * The keys in archive are the same as the latest version
+ // * The latest version is constant
+ // * The number of keys in the policy itself is from the min
+ // decryption version up to the latest version, so for e.g. 7 and
+ // 10, you'd need 7, 8, 9, and 10 -- IOW, latest version - min
+ // decryption version plus 1 (the min decryption version key
+ // itself)
+ checkKeys(t, p, storage, "minadd", 10, 10, p.LatestVersion-p.MinDecryptionVersion+1)
+ }
+
+ // Move the min decryption version down
+ for i := 10; i >= 1; i-- {
+ p.MinDecryptionVersion = i
+
+ err = p.Persist(storage)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // We expect to find:
+ // * The keys in archive are never removed so same as the latest version
+ // * The latest version is constant
+ // * The number of keys in the policy itself is from the min
+ // decryption version up to the latest version, so for e.g. 7 and
+ // 10, you'd need 7, 8, 9, and 10 -- IOW, latest version - min
+ // decryption version plus 1 (the min decryption version key
+ // itself)
+ checkKeys(t, p, storage, "minsub", 10, 10, p.LatestVersion-p.MinDecryptionVersion+1)
+ }
+}
+
+func checkKeys(t *testing.T,
+ p *Policy,
+ storage logical.Storage,
+ action string,
+ archiveVer, latestVer, keysSize int) {
+
+ // Sanity check
+ if len(keysArchive) != latestVer+1 {
+ t.Fatalf("latest expected key version is %d, expected test keys archive size is %d, "+
+ "but keys archive is of size %d", latestVer, latestVer+1, len(keysArchive))
+ }
+
+ archive, err := p.LoadArchive(storage)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ badArchiveVer := false
+ if archiveVer == 0 {
+ if len(archive.Keys) != 0 || p.ArchiveVersion != 0 {
+ badArchiveVer = true
+ }
+ } else {
+ // We need to subtract one because we have the indexes match key
+ // versions, which start at 1. So for an archive version of 1, we
+ // actually have two entries -- a blank 0 entry, and the key at spot 1
+ if archiveVer != len(archive.Keys)-1 || archiveVer != p.ArchiveVersion {
+ badArchiveVer = true
+ }
+ }
+ if badArchiveVer {
+ t.Fatalf(
+ "expected archive version %d, found length of archive keys %d and policy archive version %d",
+ archiveVer, len(archive.Keys), p.ArchiveVersion,
+ )
+ }
+
+ if latestVer != p.LatestVersion {
+ t.Fatalf(
+ "expected latest version %d, found %d",
+ latestVer, p.LatestVersion,
+ )
+ }
+
+ if keysSize != len(p.Keys) {
+ t.Fatalf(
+ "expected keys size %d, found %d, action is %s, policy is \n%#v\n",
+ keysSize, len(p.Keys), action, p,
+ )
+ }
+
+ for i := p.MinDecryptionVersion; i <= p.LatestVersion; i++ {
+ if _, ok := p.Keys[i]; !ok {
+ t.Fatalf(
+ "expected key %d, did not find it in policy keys", i,
+ )
+ }
+ }
+
+ for i := p.MinDecryptionVersion; i <= p.LatestVersion; i++ {
+ if !reflect.DeepEqual(p.Keys[i], keysArchive[i]) {
+ t.Fatalf("key %d not equivalent between policy keys and test keys archive", i)
+ }
+ }
+
+ for i := 1; i < len(archive.Keys); i++ {
+ if !reflect.DeepEqual(archive.Keys[i].AESKey, keysArchive[i].AESKey) {
+ t.Fatalf("key %d not equivalent between policy archive and test keys archive", i)
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/kv-builder/builder.go b/vendor/github.com/hashicorp/vault/helper/kv-builder/builder.go
new file mode 100644
index 0000000..7ecf754
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/kv-builder/builder.go
@@ -0,0 +1,128 @@
+package kvbuilder
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/mitchellh/mapstructure"
+)
+
+// Builder is a struct to build a key/value mapping based on a list
+// of "k=v" pairs, where the value might come from stdin, a file, etc.
+type Builder struct {
+ Stdin io.Reader
+
+ result map[string]interface{}
+ stdin bool
+}
+
+// Map returns the built map.
+func (b *Builder) Map() map[string]interface{} {
+ return b.result
+}
+
+// Add adds to the mapping with the given args.
+func (b *Builder) Add(args ...string) error {
+ for _, a := range args {
+ if err := b.add(a); err != nil {
+ return fmt.Errorf("Invalid key/value pair '%s': %s", a, err)
+ }
+ }
+
+ return nil
+}
+
+func (b *Builder) add(raw string) error {
+ // Regardless of validity, make sure we make our result
+ if b.result == nil {
+ b.result = make(map[string]interface{})
+ }
+
+ // Empty strings are fine, just ignored
+ if raw == "" {
+ return nil
+ }
+
+ // If the arg is exactly "-", then we need to read from stdin
+ // and merge the results into the resulting structure.
+ if raw == "-" {
+ if b.Stdin == nil {
+ return fmt.Errorf("stdin is not supported")
+ }
+ if b.stdin {
+ return fmt.Errorf("stdin already consumed")
+ }
+
+ b.stdin = true
+ return b.addReader(b.Stdin)
+ }
+
+ // If the arg begins with "@" then we need to read a file directly
+ if raw[0] == '@' {
+ f, err := os.Open(raw[1:])
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ return b.addReader(f)
+ }
+
+ // Split into key/value
+ parts := strings.SplitN(raw, "=", 2)
+ if len(parts) != 2 {
+ return fmt.Errorf("format must be key=value")
+ }
+ key, value := parts[0], parts[1]
+
+ if len(value) > 0 {
+ if value[0] == '@' {
+ contents, err := ioutil.ReadFile(value[1:])
+ if err != nil {
+ return fmt.Errorf("error reading file: %s", err)
+ }
+
+ value = string(contents)
+ } else if value[0] == '\\' && value[1] == '@' {
+ value = value[1:]
+ } else if value == "-" {
+ if b.Stdin == nil {
+ return fmt.Errorf("stdin is not supported")
+ }
+ if b.stdin {
+ return fmt.Errorf("stdin already consumed")
+ }
+ b.stdin = true
+
+ var buf bytes.Buffer
+ if _, err := io.Copy(&buf, b.Stdin); err != nil {
+ return err
+ }
+
+ value = buf.String()
+ }
+ }
+
+ // Repeated keys will be converted into a slice
+ if existingValue, ok := b.result[key]; ok {
+ var sliceValue []interface{}
+ if err := mapstructure.WeakDecode(existingValue, &sliceValue); err != nil {
+ return err
+ }
+ sliceValue = append(sliceValue, value)
+ b.result[key] = sliceValue
+ return nil
+ }
+
+ b.result[key] = value
+ return nil
+}
+
+func (b *Builder) addReader(r io.Reader) error {
+ return jsonutil.DecodeJSONFromReader(r, &b.result)
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/kv-builder/builder_test.go b/vendor/github.com/hashicorp/vault/helper/kv-builder/builder_test.go
new file mode 100644
index 0000000..9b0cffb
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/kv-builder/builder_test.go
@@ -0,0 +1,120 @@
+package kvbuilder
+
+import (
+ "bytes"
+ "reflect"
+ "testing"
+)
+
+func TestBuilder_basic(t *testing.T) {
+ var b Builder
+ err := b.Add("foo=bar", "bar=baz", "baz=")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ expected := map[string]interface{}{
+ "foo": "bar",
+ "bar": "baz",
+ "baz": "",
+ }
+ actual := b.Map()
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: %#v", actual)
+ }
+}
+
+func TestBuilder_escapedAt(t *testing.T) {
+ var b Builder
+ err := b.Add("foo=bar", "bar=\\@baz")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ expected := map[string]interface{}{
+ "foo": "bar",
+ "bar": "@baz",
+ }
+ actual := b.Map()
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: %#v", actual)
+ }
+}
+
+func TestBuilder_stdin(t *testing.T) {
+ var b Builder
+ b.Stdin = bytes.NewBufferString("baz")
+ err := b.Add("foo=bar", "bar=-")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ expected := map[string]interface{}{
+ "foo": "bar",
+ "bar": "baz",
+ }
+ actual := b.Map()
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: %#v", actual)
+ }
+}
+
+func TestBuilder_stdinMap(t *testing.T) {
+ var b Builder
+ b.Stdin = bytes.NewBufferString(`{"foo": "bar"}`)
+ err := b.Add("-", "bar=baz")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ expected := map[string]interface{}{
+ "foo": "bar",
+ "bar": "baz",
+ }
+ actual := b.Map()
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: %#v", actual)
+ }
+}
+
+func TestBuilder_stdinTwice(t *testing.T) {
+ var b Builder
+ b.Stdin = bytes.NewBufferString(`{"foo": "bar"}`)
+ err := b.Add("-", "-")
+ if err == nil {
+ t.Fatal("should error")
+ }
+}
+
+func TestBuilder_sameKeyTwice(t *testing.T) {
+ var b Builder
+ err := b.Add("foo=bar", "foo=baz")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ expected := map[string]interface{}{
+ "foo": []interface{}{"bar", "baz"},
+ }
+ actual := b.Map()
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: %#v", actual)
+ }
+}
+
+func TestBuilder_sameKeyMultipleTimes(t *testing.T) {
+ var b Builder
+ err := b.Add("foo=bar", "foo=baz", "foo=bay", "foo=bax", "bar=baz")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ expected := map[string]interface{}{
+ "foo": []interface{}{"bar", "baz", "bay", "bax"},
+ "bar": "baz",
+ }
+ actual := b.Map()
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: %#v", actual)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/locksutil/locks.go b/vendor/github.com/hashicorp/vault/helper/locksutil/locks.go
new file mode 100644
index 0000000..dcf1b4b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/locksutil/locks.go
@@ -0,0 +1,32 @@
+package locksutil
+
+import (
+ "crypto/md5"
+ "sync"
+)
+
+const (
+ LockCount = 256
+)
+
+type LockEntry struct {
+ sync.RWMutex
+}
+
+func CreateLocks() []*LockEntry {
+ ret := make([]*LockEntry, LockCount)
+ for i := range ret {
+ ret[i] = new(LockEntry)
+ }
+ return ret
+}
+
+func LockIndexForKey(key string) uint8 {
+ hf := md5.New()
+ hf.Write([]byte(key))
+ return uint8(hf.Sum(nil)[0])
+}
+
+func LockForKey(locks []*LockEntry, key string) *LockEntry {
+ return locks[LockIndexForKey(key)]
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/locksutil/locks_test.go b/vendor/github.com/hashicorp/vault/helper/locksutil/locks_test.go
new file mode 100644
index 0000000..9916644
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/locksutil/locks_test.go
@@ -0,0 +1,10 @@
+package locksutil
+
+import "testing"
+
+func Test_CreateLocks(t *testing.T) {
+ locks := CreateLocks()
+ if len(locks) != 256 {
+ t.Fatalf("bad: len(locks): expected:256 actual:%d", len(locks))
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/logformat/vault.go b/vendor/github.com/hashicorp/vault/helper/logformat/vault.go
new file mode 100644
index 0000000..fa53a19
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/logformat/vault.go
@@ -0,0 +1,175 @@
+package logformat
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "sync"
+ "time"
+
+ log "github.com/mgutz/logxi/v1"
+)
+
+const (
+ styledefault = iota
+ stylejson
+)
+
+// NewVaultLogger creates a new logger with the specified level and a Vault
+// formatter
+func NewVaultLogger(level int) log.Logger {
+ logger := log.New("vault")
+ return setLevelFormatter(logger, level, createVaultFormatter())
+}
+
+// NewVaultLoggerWithWriter creates a new logger with the specified level and
+// writer and a Vault formatter
+func NewVaultLoggerWithWriter(w io.Writer, level int) log.Logger {
+ logger := log.NewLogger(w, "vault")
+ return setLevelFormatter(logger, level, createVaultFormatter())
+}
+
+// Sets the level and formatter on the log, which must be a DefaultLogger
+func setLevelFormatter(logger log.Logger, level int, formatter log.Formatter) log.Logger {
+ logger.(*log.DefaultLogger).SetLevel(level)
+ logger.(*log.DefaultLogger).SetFormatter(formatter)
+ return logger
+}
+
+// Creates a formatter, checking env vars for the style
+func createVaultFormatter() log.Formatter {
+ ret := &vaultFormatter{
+ Mutex: &sync.Mutex{},
+ }
+ logFormat := os.Getenv("VAULT_LOG_FORMAT")
+ if logFormat == "" {
+ logFormat = os.Getenv("LOGXI_FORMAT")
+ }
+ switch strings.ToLower(logFormat) {
+ case "json", "vault_json", "vault-json", "vaultjson":
+ ret.style = stylejson
+ default:
+ ret.style = styledefault
+ }
+ return ret
+}
+
+// Thread safe formatter
+type vaultFormatter struct {
+ *sync.Mutex
+ style int
+ module string
+}
+
+func (v *vaultFormatter) Format(writer io.Writer, level int, msg string, args []interface{}) {
+ currTime := time.Now()
+ v.Lock()
+ defer v.Unlock()
+ switch v.style {
+ case stylejson:
+ v.formatJSON(writer, currTime, level, msg, args)
+ default:
+ v.formatDefault(writer, currTime, level, msg, args)
+ }
+}
+
+func (v *vaultFormatter) formatDefault(writer io.Writer, currTime time.Time, level int, msg string, args []interface{}) {
+ // Write a trailing newline
+ defer writer.Write([]byte("\n"))
+
+ writer.Write([]byte(currTime.Local().Format("2006/01/02 15:04:05.000000")))
+
+ switch level {
+ case log.LevelCritical:
+ writer.Write([]byte(" [CRIT ] "))
+ case log.LevelError:
+ writer.Write([]byte(" [ERROR] "))
+ case log.LevelWarn:
+ writer.Write([]byte(" [WARN ] "))
+ case log.LevelInfo:
+ writer.Write([]byte(" [INFO ] "))
+ case log.LevelDebug:
+ writer.Write([]byte(" [DEBUG] "))
+ case log.LevelTrace:
+ writer.Write([]byte(" [TRACE] "))
+ default:
+ writer.Write([]byte(" [ALL ] "))
+ }
+
+ if v.module != "" {
+ writer.Write([]byte(fmt.Sprintf("(%s) ", v.module)))
+ }
+
+ writer.Write([]byte(msg))
+
+ if args != nil && len(args) > 0 {
+ if len(args)%2 != 0 {
+ args = append(args, "[unknown!]")
+ }
+
+ writer.Write([]byte(":"))
+
+ for i := 0; i < len(args); i = i + 2 {
+ var quote string
+ switch args[i+1].(type) {
+ case string:
+ if strings.ContainsRune(args[i+1].(string), ' ') {
+ quote = `"`
+ }
+ }
+ writer.Write([]byte(fmt.Sprintf(" %s=%s%v%s", args[i], quote, args[i+1], quote)))
+ }
+ }
+}
+
+func (v *vaultFormatter) formatJSON(writer io.Writer, currTime time.Time, level int, msg string, args []interface{}) {
+ vals := map[string]interface{}{
+ "@message": msg,
+ "@timestamp": currTime.Format("2006-01-02T15:04:05.000000Z07:00"),
+ }
+
+ var levelStr string
+ switch level {
+ case log.LevelCritical:
+ levelStr = "critical"
+ case log.LevelError:
+ levelStr = "error"
+ case log.LevelWarn:
+ levelStr = "warn"
+ case log.LevelInfo:
+ levelStr = "info"
+ case log.LevelDebug:
+ levelStr = "debug"
+ case log.LevelTrace:
+ levelStr = "trace"
+ default:
+ levelStr = "all"
+ }
+
+ vals["@level"] = levelStr
+
+ if v.module != "" {
+ vals["@module"] = v.module
+ }
+
+ if args != nil && len(args) > 0 {
+
+ if len(args)%2 != 0 {
+ args = append(args, "[unknown!]")
+ }
+
+ for i := 0; i < len(args); i = i + 2 {
+ if _, ok := args[i].(string); !ok {
+ // As this is the logging function not much we can do here
+ // without injecting into logs...
+ continue
+ }
+ vals[args[i].(string)] = args[i+1]
+ }
+ }
+
+ enc := json.NewEncoder(writer)
+ enc.Encode(vals)
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/mfa/duo/duo.go b/vendor/github.com/hashicorp/vault/helper/mfa/duo/duo.go
new file mode 100644
index 0000000..db97074
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/mfa/duo/duo.go
@@ -0,0 +1,148 @@
+// Package duo provides a Duo MFA handler to authenticate users
+// with Duo. This handler is registered as the "duo" type in
+// mfa_config.
+package duo
+
+import (
+ "fmt"
+ "net/url"
+
+ "github.com/duosecurity/duo_api_golang/authapi"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+// DuoPaths returns path functions to configure Duo.
+func DuoPaths() []*framework.Path {
+ return []*framework.Path{
+ pathDuoConfig(),
+ pathDuoAccess(),
+ }
+}
+
+// DuoRootPaths returns the paths that are used to configure Duo.
+func DuoRootPaths() []string {
+ return []string{
+ "duo/access",
+ "duo/config",
+ }
+}
+
+// DuoHandler interacts with the Duo Auth API to authenticate a user
+// login request. If successful, the original response from the login
+// backend is returned.
+func DuoHandler(req *logical.Request, d *framework.FieldData, resp *logical.Response) (
+ *logical.Response, error) {
+ duoConfig, err := GetDuoConfig(req)
+ if err != nil || duoConfig == nil {
+ return logical.ErrorResponse("Could not load Duo configuration"), nil
+ }
+
+ duoAuthClient, err := GetDuoAuthClient(req, duoConfig)
+ if err != nil {
+ return logical.ErrorResponse(err.Error()), nil
+ }
+
+ username, ok := resp.Auth.Metadata["username"]
+ if !ok {
+ return logical.ErrorResponse("Could not read username for MFA"), nil
+ }
+
+ var request *duoAuthRequest = &duoAuthRequest{}
+ request.successResp = resp
+ request.username = username
+ request.method = d.Get("method").(string)
+ request.passcode = d.Get("passcode").(string)
+ request.ipAddr = req.Connection.RemoteAddr
+
+ return duoHandler(duoConfig, duoAuthClient, request)
+}
+
+type duoAuthRequest struct {
+ successResp *logical.Response
+ username string
+ method string
+ passcode string
+ ipAddr string
+}
+
+func duoHandler(duoConfig *DuoConfig, duoAuthClient AuthClient, request *duoAuthRequest) (
+ *logical.Response, error) {
+
+ duoUser := fmt.Sprintf(duoConfig.UsernameFormat, request.username)
+
+ preauth, err := duoAuthClient.Preauth(
+ authapi.PreauthUsername(duoUser),
+ authapi.PreauthIpAddr(request.ipAddr),
+ )
+
+ if err != nil || preauth == nil {
+ return logical.ErrorResponse("Could not call Duo preauth"), nil
+ }
+
+ if preauth.StatResult.Stat != "OK" {
+ errorMsg := "Could not look up Duo user information"
+ if preauth.StatResult.Message != nil {
+ errorMsg = errorMsg + ": " + *preauth.StatResult.Message
+ }
+ if preauth.StatResult.Message_Detail != nil {
+ errorMsg = errorMsg + " (" + *preauth.StatResult.Message_Detail + ")"
+ }
+ return logical.ErrorResponse(errorMsg), nil
+ }
+
+ switch preauth.Response.Result {
+ case "allow":
+ return request.successResp, err
+ case "deny":
+ return logical.ErrorResponse(preauth.Response.Status_Msg), nil
+ case "enroll":
+ return logical.ErrorResponse(fmt.Sprintf("%s (%s)",
+ preauth.Response.Status_Msg,
+ preauth.Response.Enroll_Portal_Url)), nil
+ case "auth":
+ break
+ default:
+ return logical.ErrorResponse(fmt.Sprintf("Invalid Duo preauth response: %s",
+ preauth.Response.Result)), nil
+ }
+
+ options := []func(*url.Values){authapi.AuthUsername(duoUser)}
+ if request.method == "" {
+ request.method = "auto"
+ }
+ if request.method == "auto" || request.method == "push" {
+ if duoConfig.PushInfo != "" {
+ options = append(options, authapi.AuthPushinfo(duoConfig.PushInfo))
+ }
+ }
+ if request.passcode != "" {
+ request.method = "passcode"
+ options = append(options, authapi.AuthPasscode(request.passcode))
+ } else {
+ options = append(options, authapi.AuthDevice("auto"))
+ }
+
+ result, err := duoAuthClient.Auth(request.method, options...)
+
+ if err != nil || result == nil {
+ return logical.ErrorResponse("Could not call Duo auth"), nil
+ }
+
+ if result.StatResult.Stat != "OK" {
+ errorMsg := "Could not authenticate Duo user"
+ if result.StatResult.Message != nil {
+ errorMsg = errorMsg + ": " + *result.StatResult.Message
+ }
+ if result.StatResult.Message_Detail != nil {
+ errorMsg = errorMsg + " (" + *result.StatResult.Message_Detail + ")"
+ }
+ return logical.ErrorResponse(errorMsg), nil
+ }
+
+ if result.Response.Result != "allow" {
+ return logical.ErrorResponse(result.Response.Status_Msg), nil
+ }
+
+ return request.successResp, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/mfa/duo/duo_test.go b/vendor/github.com/hashicorp/vault/helper/mfa/duo/duo_test.go
new file mode 100644
index 0000000..fd31128
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/mfa/duo/duo_test.go
@@ -0,0 +1,123 @@
+package duo
+
+import (
+ "net/url"
+ "strings"
+ "testing"
+
+ "github.com/duosecurity/duo_api_golang/authapi"
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/logical"
+)
+
+type MockClientData struct {
+ PreauthData *authapi.PreauthResult
+ PreauthError error
+ AuthData *authapi.AuthResult
+ AuthError error
+}
+
+type MockAuthClient struct {
+ MockData *MockClientData
+}
+
+func (c *MockAuthClient) Preauth(options ...func(*url.Values)) (*authapi.PreauthResult, error) {
+ return c.MockData.PreauthData, c.MockData.PreauthError
+}
+
+func (c *MockAuthClient) Auth(factor string, options ...func(*url.Values)) (*authapi.AuthResult, error) {
+ return c.MockData.AuthData, c.MockData.AuthError
+}
+
+func MockGetDuoAuthClient(data *MockClientData) func(*logical.Request, *DuoConfig) (AuthClient, error) {
+ return func(*logical.Request, *DuoConfig) (AuthClient, error) {
+ return getDuoAuthClient(data), nil
+ }
+}
+
+func getDuoAuthClient(data *MockClientData) AuthClient {
+ var c MockAuthClient
+ // set default response to be successful
+ preauthSuccessJSON := `
+ {
+ "Stat": "OK",
+ "Response": {
+ "Result": "auth",
+ "Status_Msg": "Needs authentication",
+ "Devices": []
+ }
+ }`
+ if data.PreauthData == nil {
+ data.PreauthData = &authapi.PreauthResult{}
+ jsonutil.DecodeJSON([]byte(preauthSuccessJSON), data.PreauthData)
+ }
+
+ authSuccessJSON := `
+ {
+ "Stat": "OK",
+ "Response": {
+ "Result": "allow"
+ }
+ }`
+ if data.AuthData == nil {
+ data.AuthData = &authapi.AuthResult{}
+ jsonutil.DecodeJSON([]byte(authSuccessJSON), data.AuthData)
+ }
+
+ c.MockData = data
+ return &c
+}
+
+func TestDuoHandlerSuccess(t *testing.T) {
+ successResp := &logical.Response{
+ Auth: &logical.Auth{},
+ }
+ duoConfig := &DuoConfig{
+ UsernameFormat: "%s",
+ }
+ duoAuthClient := getDuoAuthClient(&MockClientData{})
+ resp, err := duoHandler(duoConfig, duoAuthClient, &duoAuthRequest{
+ successResp: successResp,
+ username: "",
+ })
+ if err != nil {
+ t.Fatalf(err.Error())
+ }
+ if resp != successResp {
+ t.Fatalf("Testing Duo authentication gave incorrect response (expected success, got: %v)", resp)
+ }
+}
+
+func TestDuoHandlerReject(t *testing.T) {
+ AuthData := &authapi.AuthResult{}
+ authRejectJSON := `
+ {
+ "Stat": "OK",
+ "Response": {
+ "Result": "deny",
+ "Status_Msg": "Invalid auth"
+ }
+ }`
+ jsonutil.DecodeJSON([]byte(authRejectJSON), AuthData)
+ successResp := &logical.Response{
+ Auth: &logical.Auth{},
+ }
+ expectedError := AuthData.Response.Status_Msg
+ duoConfig := &DuoConfig{
+ UsernameFormat: "%s",
+ }
+ duoAuthClient := getDuoAuthClient(&MockClientData{
+ AuthData: AuthData,
+ })
+ resp, err := duoHandler(duoConfig, duoAuthClient, &duoAuthRequest{
+ successResp: successResp,
+ username: "user",
+ })
+ if err != nil {
+ t.Fatalf(err.Error())
+ }
+ error, ok := resp.Data["error"].(string)
+ if !ok || !strings.Contains(error, expectedError) {
+ t.Fatalf("Testing Duo authentication gave incorrect response (expected deny, got: %v)", error)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/mfa/duo/path_duo_access.go b/vendor/github.com/hashicorp/vault/helper/mfa/duo/path_duo_access.go
new file mode 100644
index 0000000..d087b0f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/mfa/duo/path_duo_access.go
@@ -0,0 +1,109 @@
+package duo
+
+import (
+ "fmt"
+ "net/url"
+
+ "github.com/duosecurity/duo_api_golang"
+ "github.com/duosecurity/duo_api_golang/authapi"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+type AuthClient interface {
+ Preauth(options ...func(*url.Values)) (*authapi.PreauthResult, error)
+ Auth(factor string, options ...func(*url.Values)) (*authapi.AuthResult, error)
+}
+
+func pathDuoAccess() *framework.Path {
+ return &framework.Path{
+ Pattern: `duo/access`,
+ Fields: map[string]*framework.FieldSchema{
+ "skey": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Duo secret key",
+ },
+ "ikey": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Duo integration key",
+ },
+ "host": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Duo api host",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: pathDuoAccessWrite,
+ },
+
+ HelpSynopsis: pathDuoAccessHelpSyn,
+ HelpDescription: pathDuoAccessHelpDesc,
+ }
+}
+
+func GetDuoAuthClient(req *logical.Request, config *DuoConfig) (AuthClient, error) {
+ entry, err := req.Storage.Get("duo/access")
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, fmt.Errorf(
+ "Duo access credentials haven't been configured. Please configure\n" +
+ "them at the 'duo/access' endpoint")
+ }
+ var access DuoAccess
+ if err := entry.DecodeJSON(&access); err != nil {
+ return nil, err
+ }
+
+ duoClient := duoapi.NewDuoApi(
+ access.IKey,
+ access.SKey,
+ access.Host,
+ config.UserAgent,
+ )
+ duoAuthClient := authapi.NewAuthApi(*duoClient)
+ check, err := duoAuthClient.Check()
+ if err != nil {
+ return nil, err
+ }
+ if check.StatResult.Stat != "OK" {
+ return nil, fmt.Errorf("Could not connect to Duo: %s (%s)", *check.StatResult.Message, *check.StatResult.Message_Detail)
+ }
+ return duoAuthClient, nil
+}
+
+func pathDuoAccessWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ entry, err := logical.StorageEntryJSON("duo/access", DuoAccess{
+ SKey: d.Get("skey").(string),
+ IKey: d.Get("ikey").(string),
+ Host: d.Get("host").(string),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+type DuoAccess struct {
+ SKey string `json:"skey"`
+ IKey string `json:"ikey"`
+ Host string `json:"host"`
+}
+
+const pathDuoAccessHelpSyn = `
+Configure the access keys and host for Duo API connections.
+`
+
+const pathDuoAccessHelpDesc = `
+To authenticate users with Duo, the backend needs to know what host to connect to
+and must authenticate with an integration key and secret key. This endpoint is used
+to configure that information.
+`
diff --git a/vendor/github.com/hashicorp/vault/helper/mfa/duo/path_duo_config.go b/vendor/github.com/hashicorp/vault/helper/mfa/duo/path_duo_config.go
new file mode 100644
index 0000000..88c5647
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/mfa/duo/path_duo_config.go
@@ -0,0 +1,112 @@
+package duo
+
+import (
+ "errors"
+ "strings"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathDuoConfig() *framework.Path {
+ return &framework.Path{
+ Pattern: `duo/config`,
+ Fields: map[string]*framework.FieldSchema{
+ "user_agent": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "User agent to connect to Duo (default \"\")",
+ },
+ "username_format": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Format string given auth backend username as argument to create Duo username (default '%s')",
+ },
+ "push_info": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "A string of URL-encoded key/value pairs that provides additional context about the authentication attemmpt in the Duo Mobile app",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: pathDuoConfigWrite,
+ logical.ReadOperation: pathDuoConfigRead,
+ },
+
+ HelpSynopsis: pathDuoConfigHelpSyn,
+ HelpDescription: pathDuoConfigHelpDesc,
+ }
+}
+
+func GetDuoConfig(req *logical.Request) (*DuoConfig, error) {
+ var result DuoConfig
+ // all config parameters are optional, so path need not exist
+ entry, err := req.Storage.Get("duo/config")
+ if err == nil && entry != nil {
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+ }
+ if result.UsernameFormat == "" {
+ result.UsernameFormat = "%s"
+ }
+ return &result, nil
+}
+
+func pathDuoConfigWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ username_format := d.Get("username_format").(string)
+ if username_format == "" {
+ username_format = "%s"
+ }
+ if !strings.Contains(username_format, "%s") {
+ return nil, errors.New("username_format must include username ('%s')")
+ }
+ entry, err := logical.StorageEntryJSON("duo/config", DuoConfig{
+ UsernameFormat: username_format,
+ UserAgent: d.Get("user_agent").(string),
+ PushInfo: d.Get("push_info").(string),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func pathDuoConfigRead(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+
+ config, err := GetDuoConfig(req)
+ if err != nil {
+ return nil, err
+ }
+ if config == nil {
+ return nil, nil
+ }
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "username_format": config.UsernameFormat,
+ "user_agent": config.UserAgent,
+ "push_info": config.PushInfo,
+ },
+ }, nil
+}
+
+type DuoConfig struct {
+ UsernameFormat string `json:"username_format"`
+ UserAgent string `json:"user_agent"`
+ PushInfo string `json:"push_info"`
+}
+
+const pathDuoConfigHelpSyn = `
+Configure Duo second factor behavior.
+`
+
+const pathDuoConfigHelpDesc = `
+This endpoint allows you to configure how the original auth backend username maps to
+the Duo username by providing a template format string.
+`
diff --git a/vendor/github.com/hashicorp/vault/helper/mfa/mfa.go b/vendor/github.com/hashicorp/vault/helper/mfa/mfa.go
new file mode 100644
index 0000000..9939a5f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/mfa/mfa.go
@@ -0,0 +1,86 @@
+// Package mfa provides wrappers to add multi-factor authentication
+// to any auth backend.
+//
+// To add MFA to a backend, replace its login path with the
+// paths returned by MFAPaths and add the additional root
+// paths returned by MFARootPaths. The backend provides
+// the username to the MFA wrapper in Auth.Metadata['username'].
+//
+// To add an additional MFA type, create a subpackage that
+// implements [Type]Paths, [Type]RootPaths, and [Type]Handler
+// functions and add them to MFAPaths, MFARootPaths, and
+// handlers respectively.
+package mfa
+
+import (
+ "github.com/hashicorp/vault/helper/mfa/duo"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+// MFAPaths returns paths to wrap the original login path and configure MFA.
+// When adding MFA to a backend, these paths should be included instead of
+// the login path in Backend.Paths.
+func MFAPaths(originalBackend *framework.Backend, loginPath *framework.Path) []*framework.Path {
+ var b backend
+ b.Backend = originalBackend
+ return append(duo.DuoPaths(), pathMFAConfig(&b), wrapLoginPath(&b, loginPath))
+}
+
+// MFARootPaths returns path strings used to configure MFA. When adding MFA
+// to a backend, these paths should be included in
+// Backend.PathsSpecial.Root.
+func MFARootPaths() []string {
+ return append(duo.DuoRootPaths(), "mfa_config")
+}
+
+// HandlerFunc is the callback called to handle MFA for a login request.
+type HandlerFunc func(*logical.Request, *framework.FieldData, *logical.Response) (*logical.Response, error)
+
+// handlers maps each supported MFA type to its handler.
+var handlers = map[string]HandlerFunc{
+ "duo": duo.DuoHandler,
+}
+
+type backend struct {
+ *framework.Backend
+}
+
+func wrapLoginPath(b *backend, loginPath *framework.Path) *framework.Path {
+ loginPath.Fields["passcode"] = &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "One time passcode (optional)",
+ }
+ loginPath.Fields["method"] = &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Multi-factor auth method to use (optional)",
+ }
+ // wrap write callback to do MFA after auth
+ loginHandler := loginPath.Callbacks[logical.UpdateOperation]
+ loginPath.Callbacks[logical.UpdateOperation] = b.wrapLoginHandler(loginHandler)
+ return loginPath
+}
+
+func (b *backend) wrapLoginHandler(loginHandler framework.OperationFunc) framework.OperationFunc {
+ return func(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ // login with original login function first
+ resp, err := loginHandler(req, d)
+ if err != nil || resp.Auth == nil {
+ return resp, err
+ }
+
+ // check if multi-factor enabled
+ mfa_config, err := b.MFAConfig(req)
+ if err != nil || mfa_config == nil {
+ return resp, nil
+ }
+
+ // perform multi-factor authentication if type supported
+ handler, ok := handlers[mfa_config.Type]
+ if ok {
+ return handler(req, d, resp)
+ } else {
+ return resp, err
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/mfa/mfa_test.go b/vendor/github.com/hashicorp/vault/helper/mfa/mfa_test.go
new file mode 100644
index 0000000..f618971
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/mfa/mfa_test.go
@@ -0,0 +1,129 @@
+package mfa
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ logicaltest "github.com/hashicorp/vault/logical/testing"
+)
+
+// MakeTestBackend creates a simple MFA enabled backend.
+// Login (before MFA) always succeeds with policy "foo".
+// An MFA "test" type is added to mfa.handlers that succeeds
+// if MFA method is "accept", otherwise it rejects.
+func MakeTestBackend() *framework.Backend {
+ handlers["test"] = testMFAHandler
+ b := &framework.Backend{
+ Help: "",
+
+ PathsSpecial: &logical.Paths{
+ Root: MFARootPaths(),
+ Unauthenticated: []string{
+ "login",
+ },
+ },
+ Paths: MFAPaths(nil, testPathLogin()),
+ }
+ return b
+}
+
+func testPathLogin() *framework.Path {
+ return &framework.Path{
+ Pattern: `login`,
+ Fields: map[string]*framework.FieldSchema{
+ "username": &framework.FieldSchema{
+ Type: framework.TypeString,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: testPathLoginHandler,
+ },
+ }
+}
+
+func testPathLoginHandler(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ username := d.Get("username").(string)
+
+ return &logical.Response{
+ Auth: &logical.Auth{
+ Policies: []string{"foo"},
+ Metadata: map[string]string{
+ "username": username,
+ },
+ },
+ }, nil
+}
+
+func testMFAHandler(req *logical.Request, d *framework.FieldData, resp *logical.Response) (
+ *logical.Response, error) {
+ if d.Get("method").(string) != "accept" {
+ return logical.ErrorResponse("Deny access"), nil
+ } else {
+ return resp, nil
+ }
+}
+
+func TestMFALogin(t *testing.T) {
+ b := MakeTestBackend()
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ AcceptanceTest: true,
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepEnableMFA(t),
+ testAccStepLogin(t, "user"),
+ },
+ })
+}
+
+func TestMFALoginDenied(t *testing.T) {
+ b := MakeTestBackend()
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ AcceptanceTest: true,
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepEnableMFA(t),
+ testAccStepLoginDenied(t, "user"),
+ },
+ })
+}
+
+func testAccStepEnableMFA(t *testing.T) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "mfa_config",
+ Data: map[string]interface{}{
+ "type": "test",
+ },
+ }
+}
+
+func testAccStepLogin(t *testing.T, username string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "login",
+ Data: map[string]interface{}{
+ "method": "accept",
+ "username": username,
+ },
+ Unauthenticated: true,
+ Check: logicaltest.TestCheckAuth([]string{"foo"}),
+ }
+}
+
+func testAccStepLoginDenied(t *testing.T, username string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "login",
+ Data: map[string]interface{}{
+ "method": "deny",
+ "username": username,
+ },
+ Unauthenticated: true,
+ Check: logicaltest.TestCheckError(),
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/mfa/path_mfa_config.go b/vendor/github.com/hashicorp/vault/helper/mfa/path_mfa_config.go
new file mode 100644
index 0000000..8f96e25
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/mfa/path_mfa_config.go
@@ -0,0 +1,88 @@
+package mfa
+
+import (
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathMFAConfig(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: `mfa_config`,
+ Fields: map[string]*framework.FieldSchema{
+ "type": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Enables MFA with given backend (available: duo)",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathMFAConfigWrite,
+ logical.ReadOperation: b.pathMFAConfigRead,
+ },
+
+ HelpSynopsis: pathMFAConfigHelpSyn,
+ HelpDescription: pathMFAConfigHelpDesc,
+ }
+}
+
+func (b *backend) MFAConfig(req *logical.Request) (*MFAConfig, error) {
+ entry, err := req.Storage.Get("mfa_config")
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+ var result MFAConfig
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+ return &result, nil
+}
+
+func (b *backend) pathMFAConfigWrite(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ entry, err := logical.StorageEntryJSON("mfa_config", MFAConfig{
+ Type: d.Get("type").(string),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *backend) pathMFAConfigRead(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+
+ config, err := b.MFAConfig(req)
+ if err != nil {
+ return nil, err
+ }
+ if config == nil {
+ return nil, nil
+ }
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "type": config.Type,
+ },
+ }, nil
+}
+
+type MFAConfig struct {
+ Type string `json:"type"`
+}
+
+const pathMFAConfigHelpSyn = `
+Configure multi factor backend.
+`
+
+const pathMFAConfigHelpDesc = `
+This endpoint allows you to turn on multi-factor authentication with a given backend.
+Currently only Duo is supported.
+`
diff --git a/vendor/github.com/hashicorp/vault/helper/mlock/mlock.go b/vendor/github.com/hashicorp/vault/helper/mlock/mlock.go
new file mode 100644
index 0000000..1675633
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/mlock/mlock.go
@@ -0,0 +1,15 @@
+package mlock
+
+// This should be set by the OS-specific packages to tell whether LockMemory
+// is supported or not.
+var supported bool
+
+// Supported returns true if LockMemory is functional on this system.
+func Supported() bool {
+ return supported
+}
+
+// LockMemory prevents any memory from being swapped to disk.
+func LockMemory() error {
+ return lockMemory()
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/mlock/mlock_unavail.go b/vendor/github.com/hashicorp/vault/helper/mlock/mlock_unavail.go
new file mode 100644
index 0000000..8084963
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/mlock/mlock_unavail.go
@@ -0,0 +1,13 @@
+// +build android darwin nacl netbsd plan9 windows
+
+package mlock
+
+func init() {
+ supported = false
+}
+
+func lockMemory() error {
+ // XXX: No good way to do this on Windows. There is the VirtualLock
+ // method, but it requires a specific address and offset.
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/mlock/mlock_unix.go b/vendor/github.com/hashicorp/vault/helper/mlock/mlock_unix.go
new file mode 100644
index 0000000..af0a69d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/mlock/mlock_unix.go
@@ -0,0 +1,18 @@
+// +build dragonfly freebsd linux openbsd solaris
+
+package mlock
+
+import (
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+func init() {
+ supported = true
+}
+
+func lockMemory() error {
+ // Mlockall prevents all current and future pages from being swapped out.
+ return unix.Mlockall(syscall.MCL_CURRENT | syscall.MCL_FUTURE)
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil.go b/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil.go
new file mode 100644
index 0000000..9ba2bf7
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil.go
@@ -0,0 +1,62 @@
+package parseutil
+
+import (
+ "encoding/json"
+ "errors"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/mitchellh/mapstructure"
+)
+
+func ParseDurationSecond(in interface{}) (time.Duration, error) {
+ var dur time.Duration
+ jsonIn, ok := in.(json.Number)
+ if ok {
+ in = jsonIn.String()
+ }
+ switch in.(type) {
+ case string:
+ inp := in.(string)
+ var err error
+ // Look for a suffix otherwise its a plain second value
+ if strings.HasSuffix(inp, "s") || strings.HasSuffix(inp, "m") || strings.HasSuffix(inp, "h") {
+ dur, err = time.ParseDuration(inp)
+ if err != nil {
+ return dur, err
+ }
+ } else {
+ // Plain integer
+ secs, err := strconv.ParseInt(inp, 10, 64)
+ if err != nil {
+ return dur, err
+ }
+ dur = time.Duration(secs) * time.Second
+ }
+ case int:
+ dur = time.Duration(in.(int)) * time.Second
+ case int32:
+ dur = time.Duration(in.(int32)) * time.Second
+ case int64:
+ dur = time.Duration(in.(int64)) * time.Second
+ case uint:
+ dur = time.Duration(in.(uint)) * time.Second
+ case uint32:
+ dur = time.Duration(in.(uint32)) * time.Second
+ case uint64:
+ dur = time.Duration(in.(uint64)) * time.Second
+ default:
+ return 0, errors.New("could not parse duration from input")
+ }
+
+ return dur, nil
+}
+
+func ParseBool(in interface{}) (bool, error) {
+ var result bool
+ if err := mapstructure.WeakDecode(in, &result); err != nil {
+ return false, err
+ }
+ return result, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil_test.go b/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil_test.go
new file mode 100644
index 0000000..7168a45
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil_test.go
@@ -0,0 +1,55 @@
+package parseutil
+
+import (
+ "encoding/json"
+ "testing"
+ "time"
+)
+
+func Test_ParseDurationSecond(t *testing.T) {
+ outp, err := ParseDurationSecond("9876s")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if outp != time.Duration(9876)*time.Second {
+ t.Fatal("not equivalent")
+ }
+ outp, err = ParseDurationSecond("9876")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if outp != time.Duration(9876)*time.Second {
+ t.Fatal("not equivalent")
+ }
+ outp, err = ParseDurationSecond(json.Number("4352"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if outp != time.Duration(4352)*time.Second {
+ t.Fatal("not equivalent")
+ }
+}
+
+func Test_ParseBool(t *testing.T) {
+ outp, err := ParseBool("true")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !outp {
+ t.Fatal("wrong output")
+ }
+ outp, err = ParseBool(1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !outp {
+ t.Fatal("wrong output")
+ }
+ outp, err = ParseBool(true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !outp {
+ t.Fatal("wrong output")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/password/password.go b/vendor/github.com/hashicorp/vault/helper/password/password.go
new file mode 100644
index 0000000..102fbe8
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/password/password.go
@@ -0,0 +1,64 @@
+// password is a package for reading a password securely from a terminal.
+// The code in this package disables echo in the terminal so that the
+// password is not echoed back in plaintext to the user.
+package password
+
+import (
+ "errors"
+ "io"
+ "os"
+ "os/signal"
+)
+
+var ErrInterrupted = errors.New("interrupted")
+
+// Read reads the password from the given os.File. The password
+// will not be echoed back to the user. Ctrl-C will automatically return
+// from this function with a blank string and an ErrInterrupted.
+func Read(f *os.File) (string, error) {
+ ch := make(chan os.Signal, 1)
+ signal.Notify(ch, os.Interrupt)
+ defer signal.Stop(ch)
+
+ // Run the actual read in a go-routine so that we can still detect signals
+ var result string
+ var resultErr error
+ doneCh := make(chan struct{})
+ go func() {
+ defer close(doneCh)
+ result, resultErr = read(f)
+ }()
+
+ // Wait on either the read to finish or the signal to come through
+ select {
+ case <-ch:
+ return "", ErrInterrupted
+ case <-doneCh:
+ return result, resultErr
+ }
+}
+
+func readline(f *os.File) (string, error) {
+ var buf [1]byte
+ resultBuf := make([]byte, 0, 64)
+ for {
+ n, err := f.Read(buf[:])
+ if err != nil && err != io.EOF {
+ return "", err
+ }
+ if n == 0 || buf[0] == '\n' || buf[0] == '\r' {
+ break
+ }
+
+ // ASCII code 3 is what is sent for a Ctrl-C while reading raw.
+ // If we see that, then get the interrupt. We have to do this here
+ // because terminals in raw mode won't catch it at the shell level.
+ if buf[0] == 3 {
+ return "", ErrInterrupted
+ }
+
+ resultBuf = append(resultBuf, buf[0])
+ }
+
+ return string(resultBuf), nil
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/password/password_solaris.go b/vendor/github.com/hashicorp/vault/helper/password/password_solaris.go
new file mode 100644
index 0000000..43ad722
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/password/password_solaris.go
@@ -0,0 +1,55 @@
+// +build solaris
+
+package password
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+func read(f *os.File) (string, error) {
+ fd := int(f.Fd())
+ if !isTerminal(fd) {
+ return "", fmt.Errorf("File descriptor %d is not a terminal", fd)
+ }
+
+ oldState, err := makeRaw(fd)
+ if err != nil {
+ return "", err
+ }
+ defer unix.IoctlSetTermios(fd, unix.TCSETS, oldState)
+
+ return readline(f)
+}
+
+// isTerminal returns true if there is a terminal attached to the given
+// file descriptor.
+// Source: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c
+func isTerminal(fd int) bool {
+ var termio unix.Termio
+ err := unix.IoctlSetTermio(fd, unix.TCGETA, &termio)
+ return err == nil
+}
+
+// makeRaw puts the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+// Source: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libast/common/uwin/getpass.c
+func makeRaw(fd int) (*unix.Termios, error) {
+ oldTermiosPtr, err := unix.IoctlGetTermios(int(fd), unix.TCGETS)
+ if err != nil {
+ return nil, err
+ }
+ oldTermios := *oldTermiosPtr
+
+ newTermios := oldTermios
+ newTermios.Lflag &^= syscall.ECHO | syscall.ECHOE | syscall.ECHOK | syscall.ECHONL
+ if err := unix.IoctlSetTermios(fd, unix.TCSETS, &newTermios); err != nil {
+ return nil, err
+ }
+
+ return oldTermiosPtr, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/password/password_unix.go b/vendor/github.com/hashicorp/vault/helper/password/password_unix.go
new file mode 100644
index 0000000..5ce7501
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/password/password_unix.go
@@ -0,0 +1,25 @@
+// +build linux darwin freebsd netbsd openbsd
+
+package password
+
+import (
+ "fmt"
+ "os"
+
+ "golang.org/x/crypto/ssh/terminal"
+)
+
+func read(f *os.File) (string, error) {
+ fd := int(f.Fd())
+ if !terminal.IsTerminal(fd) {
+ return "", fmt.Errorf("File descriptor %d is not a terminal", fd)
+ }
+
+ oldState, err := terminal.MakeRaw(fd)
+ if err != nil {
+ return "", err
+ }
+ defer terminal.Restore(fd, oldState)
+
+ return readline(f)
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/password/password_windows.go b/vendor/github.com/hashicorp/vault/helper/password/password_windows.go
new file mode 100644
index 0000000..1cd7dc7
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/password/password_windows.go
@@ -0,0 +1,48 @@
+// +build windows
+
+package password
+
+import (
+ "os"
+ "syscall"
+)
+
+var (
+ kernel32 = syscall.MustLoadDLL("kernel32.dll")
+ setConsoleModeProc = kernel32.MustFindProc("SetConsoleMode")
+)
+
+// Magic constant from MSDN to control whether charactesr read are
+// repeated back on the console.
+//
+// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
+const ENABLE_ECHO_INPUT = 0x0004
+
+func read(f *os.File) (string, error) {
+ handle := syscall.Handle(f.Fd())
+
+ // Grab the old console mode so we can reset it. We defer the reset
+ // right away because it doesn't matter (it is idempotent).
+ var oldMode uint32
+ if err := syscall.GetConsoleMode(handle, &oldMode); err != nil {
+ return "", err
+ }
+ defer setConsoleMode(handle, oldMode)
+
+ // The new mode is the old mode WITHOUT the echo input flag set.
+ var newMode uint32 = uint32(int(oldMode) & ^ENABLE_ECHO_INPUT)
+ if err := setConsoleMode(handle, newMode); err != nil {
+ return "", err
+ }
+
+ return readline(f)
+}
+
+func setConsoleMode(console syscall.Handle, mode uint32) error {
+ r, _, err := setConsoleModeProc.Call(uintptr(console), uintptr(mode))
+ if r == 0 {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/pgpkeys/encrypt_decrypt.go b/vendor/github.com/hashicorp/vault/helper/pgpkeys/encrypt_decrypt.go
new file mode 100644
index 0000000..d8b7f60
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/pgpkeys/encrypt_decrypt.go
@@ -0,0 +1,117 @@
+package pgpkeys
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+
+ "github.com/keybase/go-crypto/openpgp"
+ "github.com/keybase/go-crypto/openpgp/packet"
+)
+
+// EncryptShares takes an ordered set of byte slices to encrypt and the
+// corresponding base64-encoded public keys to encrypt them with, encrypts each
+// byte slice with the corresponding public key.
+//
+// Note: There is no corresponding test function; this functionality is
+// thoroughly tested in the init and rekey command unit tests
+func EncryptShares(input [][]byte, pgpKeys []string) ([]string, [][]byte, error) {
+ if len(input) != len(pgpKeys) {
+ return nil, nil, fmt.Errorf("Mismatch between number items to encrypt and number of PGP keys")
+ }
+ encryptedShares := make([][]byte, 0, len(pgpKeys))
+ entities, err := GetEntities(pgpKeys)
+ if err != nil {
+ return nil, nil, err
+ }
+ for i, entity := range entities {
+ ctBuf := bytes.NewBuffer(nil)
+ pt, err := openpgp.Encrypt(ctBuf, []*openpgp.Entity{entity}, nil, nil, nil)
+ if err != nil {
+ return nil, nil, fmt.Errorf("Error setting up encryption for PGP message: %s", err)
+ }
+ _, err = pt.Write(input[i])
+ if err != nil {
+ return nil, nil, fmt.Errorf("Error encrypting PGP message: %s", err)
+ }
+ pt.Close()
+ encryptedShares = append(encryptedShares, ctBuf.Bytes())
+ }
+
+ fingerprints, err := GetFingerprints(nil, entities)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return fingerprints, encryptedShares, nil
+}
+
+// GetFingerprints takes in a list of openpgp Entities and returns the
+// fingerprints. If entities is nil, it will instead parse both entities and
+// fingerprints from the pgpKeys string slice.
+func GetFingerprints(pgpKeys []string, entities []*openpgp.Entity) ([]string, error) {
+ if entities == nil {
+ var err error
+ entities, err = GetEntities(pgpKeys)
+
+ if err != nil {
+ return nil, err
+ }
+ }
+ ret := make([]string, 0, len(entities))
+ for _, entity := range entities {
+ ret = append(ret, fmt.Sprintf("%x", entity.PrimaryKey.Fingerprint))
+ }
+ return ret, nil
+}
+
+// GetEntities takes in a string array of base64-encoded PGP keys and returns
+// the openpgp Entities
+func GetEntities(pgpKeys []string) ([]*openpgp.Entity, error) {
+ ret := make([]*openpgp.Entity, 0, len(pgpKeys))
+ for _, keystring := range pgpKeys {
+ data, err := base64.StdEncoding.DecodeString(keystring)
+ if err != nil {
+ return nil, fmt.Errorf("Error decoding given PGP key: %s", err)
+ }
+ entity, err := openpgp.ReadEntity(packet.NewReader(bytes.NewBuffer(data)))
+ if err != nil {
+ return nil, fmt.Errorf("Error parsing given PGP key: %s", err)
+ }
+ ret = append(ret, entity)
+ }
+ return ret, nil
+}
+
+// DecryptBytes takes in base64-encoded encrypted bytes and the base64-encoded
+// private key and decrypts it. A bytes.Buffer is returned to allow the caller
+// to do useful thing with it (get it as a []byte, get it as a string, use it
+// as an io.Reader, etc), and also because this function doesn't know if what
+// comes out is binary data or a string, so let the caller decide.
+func DecryptBytes(encodedCrypt, privKey string) (*bytes.Buffer, error) {
+ privKeyBytes, err := base64.StdEncoding.DecodeString(privKey)
+ if err != nil {
+ return nil, fmt.Errorf("Error decoding base64 private key: %s", err)
+ }
+
+ cryptBytes, err := base64.StdEncoding.DecodeString(encodedCrypt)
+ if err != nil {
+ return nil, fmt.Errorf("Error decoding base64 crypted bytes: %s", err)
+ }
+
+ entity, err := openpgp.ReadEntity(packet.NewReader(bytes.NewBuffer(privKeyBytes)))
+ if err != nil {
+ return nil, fmt.Errorf("Error parsing private key: %s", err)
+ }
+
+ entityList := &openpgp.EntityList{entity}
+ md, err := openpgp.ReadMessage(bytes.NewBuffer(cryptBytes), entityList, nil, nil)
+ if err != nil {
+ return nil, fmt.Errorf("Error decrypting the messages: %s", err)
+ }
+
+ ptBuf := bytes.NewBuffer(nil)
+ ptBuf.ReadFrom(md.UnverifiedBody)
+
+ return ptBuf, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/pgpkeys/flag.go b/vendor/github.com/hashicorp/vault/helper/pgpkeys/flag.go
new file mode 100644
index 0000000..ccfc64b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/pgpkeys/flag.go
@@ -0,0 +1,97 @@
+package pgpkeys
+
+import (
+ "bytes"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/keybase/go-crypto/openpgp"
+)
+
+// PGPPubKeyFiles implements the flag.Value interface and allows
+// parsing and reading a list of pgp public key files
+type PubKeyFilesFlag []string
+
+func (p *PubKeyFilesFlag) String() string {
+ return fmt.Sprint(*p)
+}
+
+func (p *PubKeyFilesFlag) Set(value string) error {
+ if len(*p) > 0 {
+ return errors.New("pgp-keys can only be specified once")
+ }
+
+ splitValues := strings.Split(value, ",")
+
+ keybaseMap, err := FetchKeybasePubkeys(splitValues)
+ if err != nil {
+ return err
+ }
+
+ // Now go through the actual flag, and substitute in resolved keybase
+ // entries where appropriate
+ for _, keyfile := range splitValues {
+ if strings.HasPrefix(keyfile, kbPrefix) {
+ key := keybaseMap[keyfile]
+ if key == "" {
+ return fmt.Errorf("key for keybase user %s was not found in the map", strings.TrimPrefix(keyfile, kbPrefix))
+ }
+ *p = append(*p, key)
+ continue
+ }
+
+ pgpStr, err := ReadPGPFile(keyfile)
+ if err != nil {
+ return err
+ }
+
+ *p = append(*p, pgpStr)
+ }
+ return nil
+}
+
+func ReadPGPFile(path string) (string, error) {
+ if path[0] == '@' {
+ path = path[1:]
+ }
+ f, err := os.Open(path)
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+ buf := bytes.NewBuffer(nil)
+ _, err = buf.ReadFrom(f)
+ if err != nil {
+ return "", err
+ }
+
+ // First parse as an armored keyring file, if that doesn't work, treat it as a straight binary/b64 string
+ keyReader := bytes.NewReader(buf.Bytes())
+ entityList, err := openpgp.ReadArmoredKeyRing(keyReader)
+ if err == nil {
+ if len(entityList) != 1 {
+ return "", fmt.Errorf("more than one key found in file %s", path)
+ }
+ if entityList[0] == nil {
+ return "", fmt.Errorf("primary key was nil for file %s", path)
+ }
+
+ serializedEntity := bytes.NewBuffer(nil)
+ err = entityList[0].Serialize(serializedEntity)
+ if err != nil {
+ return "", fmt.Errorf("error serializing entity for file %s: %s", path, err)
+ }
+
+ return base64.StdEncoding.EncodeToString(serializedEntity.Bytes()), nil
+ }
+
+ _, err = base64.StdEncoding.DecodeString(buf.String())
+ if err == nil {
+ return buf.String(), nil
+ }
+ return base64.StdEncoding.EncodeToString(buf.Bytes()), nil
+
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/pgpkeys/flag_test.go b/vendor/github.com/hashicorp/vault/helper/pgpkeys/flag_test.go
new file mode 100644
index 0000000..6fa6718
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/pgpkeys/flag_test.go
@@ -0,0 +1,237 @@
+package pgpkeys
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/hex"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/keybase/go-crypto/openpgp"
+ "github.com/keybase/go-crypto/openpgp/packet"
+)
+
+func TestPubKeyFilesFlag_implements(t *testing.T) {
+ var raw interface{}
+ raw = new(PubKeyFilesFlag)
+ if _, ok := raw.(flag.Value); !ok {
+ t.Fatalf("PubKeysFilesFlag should be a Value")
+ }
+}
+
+func TestPubKeyFilesFlagSetBinary(t *testing.T) {
+ tempDir, err := ioutil.TempDir("", "vault-test")
+ if err != nil {
+ t.Fatalf("Error creating temporary directory: %s", err)
+ }
+ defer os.RemoveAll(tempDir)
+
+ decoder := base64.StdEncoding
+ pub1Bytes, err := decoder.DecodeString(pubKey1)
+ if err != nil {
+ t.Fatalf("Error decoding bytes for public key 1: %s", err)
+ }
+ err = ioutil.WriteFile(tempDir+"/pubkey1", pub1Bytes, 0755)
+ if err != nil {
+ t.Fatalf("Error writing pub key 1 to temp file: %s", err)
+ }
+ pub2Bytes, err := decoder.DecodeString(pubKey2)
+ if err != nil {
+ t.Fatalf("Error decoding bytes for public key 2: %s", err)
+ }
+ err = ioutil.WriteFile(tempDir+"/pubkey2", pub2Bytes, 0755)
+ if err != nil {
+ t.Fatalf("Error writing pub key 2 to temp file: %s", err)
+ }
+ pub3Bytes, err := decoder.DecodeString(pubKey3)
+ if err != nil {
+ t.Fatalf("Error decoding bytes for public key 3: %s", err)
+ }
+ err = ioutil.WriteFile(tempDir+"/pubkey3", pub3Bytes, 0755)
+ if err != nil {
+ t.Fatalf("Error writing pub key 3 to temp file: %s", err)
+ }
+
+ pkf := new(PubKeyFilesFlag)
+ err = pkf.Set(tempDir + "/pubkey1,@" + tempDir + "/pubkey2")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ err = pkf.Set(tempDir + "/pubkey3")
+ if err == nil {
+ t.Fatalf("err: should not have been able to set a second value")
+ }
+
+ expected := []string{strings.Replace(pubKey1, "\n", "", -1), strings.Replace(pubKey2, "\n", "", -1)}
+ if !reflect.DeepEqual(pkf.String(), fmt.Sprint(expected)) {
+ t.Fatalf("Bad: %#v", pkf)
+ }
+}
+
+func TestPubKeyFilesFlagSetB64(t *testing.T) {
+ tempDir, err := ioutil.TempDir("", "vault-test")
+ if err != nil {
+ t.Fatalf("Error creating temporary directory: %s", err)
+ }
+ defer os.RemoveAll(tempDir)
+
+ err = ioutil.WriteFile(tempDir+"/pubkey1", []byte(pubKey1), 0755)
+ if err != nil {
+ t.Fatalf("Error writing pub key 1 to temp file: %s", err)
+ }
+ err = ioutil.WriteFile(tempDir+"/pubkey2", []byte(pubKey2), 0755)
+ if err != nil {
+ t.Fatalf("Error writing pub key 2 to temp file: %s", err)
+ }
+ err = ioutil.WriteFile(tempDir+"/pubkey3", []byte(pubKey3), 0755)
+ if err != nil {
+ t.Fatalf("Error writing pub key 3 to temp file: %s", err)
+ }
+
+ pkf := new(PubKeyFilesFlag)
+ err = pkf.Set(tempDir + "/pubkey1,@" + tempDir + "/pubkey2")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ err = pkf.Set(tempDir + "/pubkey3")
+ if err == nil {
+ t.Fatalf("err: should not have been able to set a second value")
+ }
+
+ expected := []string{pubKey1, pubKey2}
+ if !reflect.DeepEqual(pkf.String(), fmt.Sprint(expected)) {
+ t.Fatalf("bad: got %s, expected %s", pkf.String(), fmt.Sprint(expected))
+ }
+}
+
+func TestPubKeyFilesFlagSetKeybase(t *testing.T) {
+ tempDir, err := ioutil.TempDir("", "vault-test")
+ if err != nil {
+ t.Fatalf("Error creating temporary directory: %s", err)
+ }
+ defer os.RemoveAll(tempDir)
+
+ err = ioutil.WriteFile(tempDir+"/pubkey2", []byte(pubKey2), 0755)
+ if err != nil {
+ t.Fatalf("Error writing pub key 2 to temp file: %s", err)
+ }
+
+ pkf := new(PubKeyFilesFlag)
+ err = pkf.Set("keybase:jefferai,@" + tempDir + "/pubkey2" + ",keybase:hashicorp")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ fingerprints := []string{}
+ for _, pubkey := range []string(*pkf) {
+ keyBytes, err := base64.StdEncoding.DecodeString(pubkey)
+ if err != nil {
+ t.Fatalf("bad: %v", err)
+ }
+ pubKeyBuf := bytes.NewBuffer(keyBytes)
+ reader := packet.NewReader(pubKeyBuf)
+ entity, err := openpgp.ReadEntity(reader)
+ if err != nil {
+ t.Fatalf("bad: %v", err)
+ }
+ if entity == nil {
+ t.Fatalf("nil entity encountered")
+ }
+ fingerprints = append(fingerprints, hex.EncodeToString(entity.PrimaryKey.Fingerprint[:]))
+ }
+
+ exp := []string{
+ "0f801f518ec853daff611e836528efcac6caa3db",
+ "cf3d4694c9f57b28cb4092c2eb832c67eb5e8957",
+ "91a6e7f85d05c65630bef18951852d87348ffc4c",
+ }
+
+ if !reflect.DeepEqual(fingerprints, exp) {
+ t.Fatalf("bad: got \n%#v\nexpected\n%#v\n", fingerprints, exp)
+ }
+}
+
+const pubKey1 = `mQENBFXbjPUBCADjNjCUQwfxKL+RR2GA6pv/1K+zJZ8UWIF9S0lk7cVIEfJiprzzwiMwBS5cD0da
+rGin1FHvIWOZxujA7oW0O2TUuatqI3aAYDTfRYurh6iKLC+VS+F7H+/mhfFvKmgr0Y5kDCF1j0T/
+063QZ84IRGucR/X43IY7kAtmxGXH0dYOCzOe5UBX1fTn3mXGe2ImCDWBH7gOViynXmb6XNvXkP0f
+sF5St9jhO7mbZU9EFkv9O3t3EaURfHopsCVDOlCkFCw5ArY+DUORHRzoMX0PnkyQb5OzibkChzpg
+8hQssKeVGpuskTdz5Q7PtdW71jXd4fFVzoNH8fYwRpziD2xNvi6HABEBAAG0EFZhdWx0IFRlc3Qg
+S2V5IDGJATgEEwECACIFAlXbjPUCGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOfLr44B
+HbeTo+sH/i7bapIgPnZsJ81hmxPj4W12uvunksGJiC7d4hIHsG7kmJRTJfjECi+AuTGeDwBy84TD
+cRaOB6e79fj65Fg6HgSahDUtKJbGxj/lWzmaBuTzlN3CEe8cMwIPqPT2kajJVdOyrvkyuFOdPFOE
+A7bdCH0MqgIdM2SdF8t40k/ATfuD2K1ZmumJ508I3gF39jgTnPzD4C8quswrMQ3bzfvKC3klXRlB
+C0yoArn+0QA3cf2B9T4zJ2qnvgotVbeK/b1OJRNj6Poeo+SsWNc/A5mw7lGScnDgL3yfwCm1gQXa
+QKfOt5x+7GqhWDw10q+bJpJlI10FfzAnhMF9etSqSeURBRW5AQ0EVduM9QEIAL53hJ5bZJ7oEDCn
+aY+SCzt9QsAfnFTAnZJQrvkvusJzrTQ088eUQmAjvxkfRqnv981fFwGnh2+I1Ktm698UAZS9Jt8y
+jak9wWUICKQO5QUt5k8cHwldQXNXVXFa+TpQWQR5yW1a9okjh5o/3d4cBt1yZPUJJyLKY43Wvptb
+6EuEsScO2DnRkh5wSMDQ7dTooddJCmaq3LTjOleRFQbu9ij386Do6jzK69mJU56TfdcydkxkWF5N
+ZLGnED3lq+hQNbe+8UI5tD2oP/3r5tXKgMy1R/XPvR/zbfwvx4FAKFOP01awLq4P3d/2xOkMu4Lu
+9p315E87DOleYwxk+FoTqXEAEQEAAYkCPgQYAQIACQUCVduM9QIbLgEpCRDny6+OAR23k8BdIAQZ
+AQIABgUCVduM9QAKCRAID0JGyHtSGmqYB/4m4rJbbWa7dBJ8VqRU7ZKnNRDR9CVhEGipBmpDGRYu
+lEimOPzLUX/ZXZmTZzgemeXLBaJJlWnopVUWuAsyjQuZAfdd8nHkGRHG0/DGum0l4sKTta3OPGHN
+C1z1dAcQ1RCr9bTD3PxjLBczdGqhzw71trkQRBRdtPiUchltPMIyjUHqVJ0xmg0hPqFic0fICsr0
+YwKoz3h9+QEcZHvsjSZjgydKvfLYcm+4DDMCCqcHuJrbXJKUWmJcXR0y/+HQONGrGJ5xWdO+6eJi
+oPn2jVMnXCm4EKc7fcLFrz/LKmJ8seXhxjM3EdFtylBGCrx3xdK0f+JDNQaC/rhUb5V2XuX6VwoH
+/AtY+XsKVYRfNIupLOUcf/srsm3IXT4SXWVomOc9hjGQiJ3rraIbADsc+6bCAr4XNZS7moViAAcI
+PXFv3m3WfUlnG/om78UjQqyVACRZqqAGmuPq+TSkRUCpt9h+A39LQWkojHqyob3cyLgy6z9Q557O
+9uK3lQozbw2gH9zC0RqnePl+rsWIUU/ga16fH6pWc1uJiEBt8UZGypQ/E56/343epmYAe0a87sHx
+8iDV+dNtDVKfPRENiLOOc19MmS+phmUyrbHqI91c0pmysYcJZCD3a502X1gpjFbPZcRtiTmGnUKd
+OIu60YPNE4+h7u2CfYyFPu3AlUaGNMBlvy6PEpU=`
+const pubKey2 = `mQENBFXbkJEBCADKb1ZvlT14XrJa2rTOe5924LQr2PTZlRv+651TXy33yEhelZ+V4sMrELN8fKEG
+Zy1kNixmbq3MCF/671k3LigHA7VrOaH9iiQgr6IIq2MeIkUYKZ27C992vQkYLjbYUG8+zl5h69S4
+0Ixm0yL0M54XOJ0gm+maEK1ZESKTUlDNkIS7l0jLZSYwfUeGXSEt6FWs8OgbyRTaHw4PDHrDEE9e
+Q67K6IZ3YMhPOL4fVk4Jwrp5R/RwiklT+lNozWEyFVwPFH4MeQMs9nMbt+fWlTzEA7tI4acI9yDk
+Cm1yN2R9rmY0UjODRiJw6z6sLV2T+Pf32n3MNSUOYczOjZa4VBwjABEBAAG0EFZhdWx0IFRlc3Qg
+S2V5IDKJATgEEwECACIFAlXbkJECGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOuDLGfr
+XolXqz4H/28IuoRxGKoJ064YHjPkkpoddW6zdzzNfHipZnNfEUiTEls4qF1IB81M2xqfiXIFRIdO
+2kaLkRPFhO0hRxbtI6VuZYLgG3QCaXhxW6GyFa5zKABqhb5ojexdnAYRswaHV201ZCclj9rnJN1P
+Ag0Rz6MdX/w1euEWktQxWKo42oZKyx8oT9p6lrv5KRmGkdrg8K8ARmRILjmwuBAgJM0eXBZHNGWX
+elk4YmOgnAAcZA6ZAo1G+8Pg6pKKP61ETewuCg3/u7N0vDttB+ZXqF88W9jAYlvdgbTtajNF5IDY
+DjTzWfeCaIB18F9gOzXq15SwWeDDI+CU9Nmq358IzXlxk4e5AQ0EVduQkQEIAOjZV5tbpfIh5Qef
+pIp2dpGMVfpgPj4RNc15CyFnb8y6dhCrdybkY9GveXJe4F3GNYnSfB42cgxrfhizX3LakmZQ/SAg
++YO5KxfCIN7Q9LPNeTgPsZZT6h8lVuXUxOFKXfRaR3/tGF5xE3e5CoZRsHV/c92h3t1LdJNOnC5m
+UKIPO4zDxiw/C2T2q3rP1kmIMaOH724kEH5A+xcp1cBHyt0tdHtIWuQv6joTJzujqViRhlCwQYzQ
+SKpSBxwhBsorPvyinZI/ZXA4XXZc5RoMqV9rikedrb1rENO8JOuPu6tMS+znFu67skq2gFFZwCQW
+IjdHm+2ukE+PE580WAWudyMAEQEAAYkCPgQYAQIACQUCVduQkQIbLgEpCRDrgyxn616JV8BdIAQZ
+AQIABgUCVduQkQAKCRArYtevdF38xtzgB/4zVzozBpVOnagRkA7FDsHo36xX60Lik+ew0m28ueDD
+hnV3bXQsCvn/6wiCVWqLOTDeYCPlyTTpEMyk8zwdCICW6MgSkVHWcEDOrRqIrqm86rirjTGjJSgQ
+e3l4CqJvkn6jybShYoBk1OZZV6vVv9hPTXXv9E6dLKoEW5YZBrrF+VC0w1iOIvaAQ+QXph20eV4K
+BIrp/bhG6PdnigKxuBZ79cdqDnXIzT9UiIa6LYpR0rbeg+7BmuZTTPS8t+41hIiKS+UZFdKa67eY
+ENtyOmEMWOFCLLRJGxkleukchiMJ70rknloZXsvJIweXBzSZ6m7mJQBgaig/L/dXyjv6+j2pNB4H
+/1trYUtJjXQKHmqlgCmpCkHt3g7JoxWvglnDNmE6q3hIWuVIYQpnzZy1g05+X9Egwc1WVpBB02H7
+PkUZTfpaP/L6DLneMmSKPhZE3I+lPIPjwrxqh6xy5uQezcWkJTNKvPWF4FJzrVvx7XTPjfGvOB0U
+PEnjvtZTp5yOhTeZK7DgIEtb/Wcrqs+iRArQKboM930ORSZhwvGK3F9V/gMDpIrvge5vDFsTEYQd
+w/2epIewH0L/FUb/6jBRcVEpGo9Ayg+Jnhq14GOGcd1y9oMZ48kYVLVBTA9tQ+82WE8Bch7uFPj4
+MFOMVRn1dc3qdXlg3mimA+iK7tABQfG0RJ9YzWs=`
+const pubKey3 = `mQENBFXbkiMBCACiHW4/VI2JkfvSEINddS7vE6wEu5e1leNQDaLUh6PrATQZS2a4Q6kRE6WlJumj
+6wCeN753Cm93UGQl2Bi3USIEeArIZnPTcocrckOVXxtoLBNKXgqKvEsDXgfw8A+doSfXoDm/3Js4
+Wy3WsYKNR9LaPuJZHnpjsFAJhvRVyhH4UFD+1RTSSefq1mozPfDdMoZeZNEpfhwt3DuTJs7RqcTH
+CgR2CqhEHnOOE5jJUljHKYLCglE2+8dth1bZlQi4xly/VHZzP3Bn7wKeolK/ROP6VZz/e0xq/BKy
+resmxvlBWZ1zWwqGIrV9b0uwYvGrh2hOd5C5+5oGaA2MGcjxwaLBABEBAAG0EFZhdWx0IFRlc3Qg
+S2V5IDOJATgEEwECACIFAlXbkiMCGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEPR5S1b8
+LcbdWjEH/2mhqC9a0Vk1IzOgxEoVxYVqVdvaxI0nTZOTfmcFYn4HQlQ+SLEoyNWe5jtkhx4k5uHi
+pxwKHzOv02YM14NWC6bvKw2CQETLDPG4Cv8YMUmpho5tnMDdttIzp8HjyJRtHazU1uTes2/yuqh6
+LHCejVJI0uST3RibquwdG3QjPP8Umxu+YC9+FOW2Kit/AQ8JluFDJdq3/wSX8VfYZrGdgmreE7KY
+MolhCkzGSPj7oFygw8LqKoJvt9tCuBKhZMBuMv1sB5CoJIWdPoqOZc4U7L1XdqfKvFZR/RhuXgN1
+lkI9MqrnLDpikL3Lk+ctLxWOjUCW8roqKoHZYBF7XPqdAfm5AQ0EVduSIwEIAOPcjd4QgbLlqIk3
+s6BPRRyVzglTgUdf+I0rUDybaDJfJobZd8U6e4hkPvRoQ8tJefnz/qnD/63watAbJYcVTme40I3V
+KDOmVGcyaDxiKP1disKqcEJd7XQiI72oAiXmEH0y+5UwnOMks/lwaAGDMGVRjHEXI6fiRPFsfTr8
+7qvMJ3pW1OiOXVSezuBNTlmyJC7srQ1/nwxL337ev6D1zQZd3JuhcxLkHrUELLNwzhvcZ70vg645
+jAmz8EdmvvoqEPPoHqKgP5AeHACOsTm953KHhgx3NYuGPU/RoIvugKt4Iq5nw7TWFTjPHGVF3GTQ
+ry5CZ/AzXiL57hVEhDvmuT8AEQEAAYkCPgQYAQIACQUCVduSIwIbLgEpCRD0eUtW/C3G3cBdIAQZ
+AQIABgUCVduSIwAKCRAFI/9Nx3K5IPOFCACsZ/Z4s2LcEoA51TW+T5w+YevlIuq+332JtqNIpuGI
+WpGxUxyDyPT0YQWr0SObBORYNr7RP8d/I2rbaFKyaDaKvRofYr+TwXy92phBo7pdEUamBpfrm/sr
++2BgAB2x3HWXp+IMdeVVhqQe8t4cnFm3c1fIdxADyiJuV5ge2Ml5gK5yNwqCQPh7U2RqC+lmVlMJ
+GvWInIRn2mf6A7phDYNZfOz6dkar4yyh5r9rRgrZw88r/yIlrq/c6KRUIgnPMrFYEauggOceZ827
++jKkbKWFEuHtiCxW7kRAN25UfnGsPaF+NSGM2q1vCG4HiFydx6lMoXM0Shf8+ZwyrV/5BzAqpWwI
+AJ37tEwC58Fboynly6OGOzgPS0xKnzkXMOtquTo0qEH/1bEUsBknn795BmZOTf4oBC5blN6qRv7c
+GSP00i+sxql1NhTjJcjJtfOPUzgfW+Af/+HR648z4c7c6MCjDFKnk8ZkoGLRU7ISjenkNFzvu2bj
+lxJkil0uJDlLPbbX80ojzV1GS9g+ZxVPR+68N1QLl2FU6zsfg34upmLLHG8VG4vExzgyNkOwfTYv
+dgyRNTjnuPue6H12fZZ9uCNeG52v7lR3eoQcCxBOniwgipB8UJ52RWXblwxzCtGtDi/EWB3zLTUn
+puKcgucA0LotbihSMxhDylaARfVO1QV6csabM/g=`
diff --git a/vendor/github.com/hashicorp/vault/helper/pgpkeys/keybase.go b/vendor/github.com/hashicorp/vault/helper/pgpkeys/keybase.go
new file mode 100644
index 0000000..5c14cbc
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/pgpkeys/keybase.go
@@ -0,0 +1,116 @@
+package pgpkeys
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/go-cleanhttp"
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/keybase/go-crypto/openpgp"
+)
+
+const (
+ kbPrefix = "keybase:"
+)
+
+// FetchKeybasePubkeys fetches public keys from Keybase given a set of
+// usernames, which are derived from correctly formatted input entries. It
+// doesn't use their client code due to both the API and the fact that it is
+// considered alpha and probably best not to rely on it. The keys are returned
+// as base64-encoded strings.
+func FetchKeybasePubkeys(input []string) (map[string]string, error) {
+ client := cleanhttp.DefaultClient()
+ if client == nil {
+ return nil, fmt.Errorf("unable to create an http client")
+ }
+
+ if len(input) == 0 {
+ return nil, nil
+ }
+
+ usernames := make([]string, 0, len(input))
+ for _, v := range input {
+ if strings.HasPrefix(v, kbPrefix) {
+ usernames = append(usernames, strings.TrimPrefix(v, kbPrefix))
+ }
+ }
+
+ if len(usernames) == 0 {
+ return nil, nil
+ }
+
+ ret := make(map[string]string, len(usernames))
+ url := fmt.Sprintf("https://keybase.io/_/api/1.0/user/lookup.json?usernames=%s&fields=public_keys", strings.Join(usernames, ","))
+ resp, err := client.Get(url)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ type publicKeys struct {
+ Primary struct {
+ Bundle string
+ }
+ }
+
+ type them struct {
+ publicKeys `json:"public_keys"`
+ }
+
+ type kbResp struct {
+ Status struct {
+ Name string
+ }
+ Them []them
+ }
+
+ out := &kbResp{
+ Them: []them{},
+ }
+
+ if err := jsonutil.DecodeJSONFromReader(resp.Body, out); err != nil {
+ return nil, err
+ }
+
+ if out.Status.Name != "OK" {
+ return nil, fmt.Errorf("got non-OK response: %s", out.Status.Name)
+ }
+
+ missingNames := make([]string, 0, len(usernames))
+ var keyReader *bytes.Reader
+ serializedEntity := bytes.NewBuffer(nil)
+ for i, themVal := range out.Them {
+ if themVal.Primary.Bundle == "" {
+ missingNames = append(missingNames, usernames[i])
+ continue
+ }
+ keyReader = bytes.NewReader([]byte(themVal.Primary.Bundle))
+ entityList, err := openpgp.ReadArmoredKeyRing(keyReader)
+ if err != nil {
+ return nil, err
+ }
+ if len(entityList) != 1 {
+ return nil, fmt.Errorf("primary key could not be parsed for user %s", usernames[i])
+ }
+ if entityList[0] == nil {
+ return nil, fmt.Errorf("primary key was nil for user %s", usernames[i])
+ }
+
+ serializedEntity.Reset()
+ err = entityList[0].Serialize(serializedEntity)
+ if err != nil {
+ return nil, fmt.Errorf("error serializing entity for user %s: %s", usernames[i], err)
+ }
+
+ // The API returns values in the same ordering requested, so this should properly match
+ ret[kbPrefix+usernames[i]] = base64.StdEncoding.EncodeToString(serializedEntity.Bytes())
+ }
+
+ if len(missingNames) > 0 {
+ return nil, fmt.Errorf("unable to fetch keys for user(s) %s from keybase", strings.Join(missingNames, ","))
+ }
+
+ return ret, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/pgpkeys/keybase_test.go b/vendor/github.com/hashicorp/vault/helper/pgpkeys/keybase_test.go
new file mode 100644
index 0000000..ded5af5
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/pgpkeys/keybase_test.go
@@ -0,0 +1,42 @@
+package pgpkeys
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/hex"
+ "reflect"
+ "testing"
+
+ "github.com/keybase/go-crypto/openpgp"
+ "github.com/keybase/go-crypto/openpgp/packet"
+)
+
+func TestFetchKeybasePubkeys(t *testing.T) {
+ testset := []string{"keybase:jefferai", "keybase:hashicorp"}
+ ret, err := FetchKeybasePubkeys(testset)
+ if err != nil {
+ t.Fatalf("bad: %v", err)
+ }
+
+ fingerprints := []string{}
+ for _, user := range testset {
+ data, err := base64.StdEncoding.DecodeString(ret[user])
+ if err != nil {
+ t.Fatalf("error decoding key for user %s: %v", user, err)
+ }
+ entity, err := openpgp.ReadEntity(packet.NewReader(bytes.NewBuffer(data)))
+ if err != nil {
+ t.Fatalf("error parsing key for user %s: %v", user, err)
+ }
+ fingerprints = append(fingerprints, hex.EncodeToString(entity.PrimaryKey.Fingerprint[:]))
+ }
+
+ exp := []string{
+ "0f801f518ec853daff611e836528efcac6caa3db",
+ "91a6e7f85d05c65630bef18951852d87348ffc4c",
+ }
+
+ if !reflect.DeepEqual(fingerprints, exp) {
+ t.Fatalf("fingerprints do not match; expected \n%#v\ngot\n%#v\n", exp, fingerprints)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/pgpkeys/test_keys.go b/vendor/github.com/hashicorp/vault/helper/pgpkeys/test_keys.go
new file mode 100644
index 0000000..c10a905
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/pgpkeys/test_keys.go
@@ -0,0 +1,271 @@
+package pgpkeys
+
+const (
+ TestPrivKey1 = `lQOYBFXbjPUBCADjNjCUQwfxKL+RR2GA6pv/1K+zJZ8UWIF9S0lk7cVIEfJiprzzwiMwBS5cD0da
+rGin1FHvIWOZxujA7oW0O2TUuatqI3aAYDTfRYurh6iKLC+VS+F7H+/mhfFvKmgr0Y5kDCF1j0T/
+063QZ84IRGucR/X43IY7kAtmxGXH0dYOCzOe5UBX1fTn3mXGe2ImCDWBH7gOViynXmb6XNvXkP0f
+sF5St9jhO7mbZU9EFkv9O3t3EaURfHopsCVDOlCkFCw5ArY+DUORHRzoMX0PnkyQb5OzibkChzpg
+8hQssKeVGpuskTdz5Q7PtdW71jXd4fFVzoNH8fYwRpziD2xNvi6HABEBAAEAB/wL+KX0mdeISEpX
+oDgt766Key1Kthe8nbEs5dOXIsP7OR7ZPcnE2hy6gftgVFnBGEZnWVN70vmJd6Z5y9d1mI+GecXj
+UL0EpI0EmohyYDJsHUnght/5ecRNFA+VeNmGPYNQGCeHJyZOiFunGGENpHU7BbubAht8delz37Mx
+JQgvMyR6AKvg8HKBoQeqV1uMWNJE/vKwV/z1dh1sjK/GFxu05Qaq0GTfAjVLuFOyJTS95yq6gblD
+jUdbHLp7tBeqIKo9voWCJF5mGOlq3973vVoWETy9b0YYPCE/M7fXmK9dJITHqkROLMW6TgcFeIw4
+yL5KOBCHk+QGPSvyQN7R7Fd5BADwuT1HZmvg7Y9GjarKXDjxdNemUiHtba2rUzfH6uNmKNQvwQek
+nma5palNUJ4/dz1aPB21FUBXJF5yWwXEdApl+lIDU0J5m4UD26rqEVRq9Kx3GsX+yfcwObkrSzW6
+kmnQSB5KI0fIuegMTM+Jxo3pB/mIRwDTMmk+vfzIGyW+7QQA8aFwFLMdKdfLgSGbl5Z6etmOAVQ2
+Oe2ebegU9z/ewi/Rdt2s9yQiAdGVM8+q15Saz8a+kyS/l1CjNPzr3VpYx1OdZ3gb7i2xoy9GdMYR
+ZpTq3TuST95kx/9DqA97JrP23G47U0vwF/cg8ixCYF8Fz5dG4DEsxgMwKqhGdW58wMMD/iytkfMk
+Vk6Z958Rpy7lhlC6L3zpO38767bSeZ8gRRi/NMFVOSGYepKFarnfxcTiNa+EoSVA6hUo1N64nALE
+sJBpyOoTfKIpz7WwTF1+WogkiYrfM6lHon1+3qlziAcRW0IohM3g2C1i3GWdON4Cl8/PDO3R0E52
+N6iG/ctNNeMiPe60EFZhdWx0IFRlc3QgS2V5IDGJATgEEwECACIFAlXbjPUCGy8GCwkIBwMCBhUI
+AgkKCwQWAgMBAh4BAheAAAoJEOfLr44BHbeTo+sH/i7bapIgPnZsJ81hmxPj4W12uvunksGJiC7d
+4hIHsG7kmJRTJfjECi+AuTGeDwBy84TDcRaOB6e79fj65Fg6HgSahDUtKJbGxj/lWzmaBuTzlN3C
+Ee8cMwIPqPT2kajJVdOyrvkyuFOdPFOEA7bdCH0MqgIdM2SdF8t40k/ATfuD2K1ZmumJ508I3gF3
+9jgTnPzD4C8quswrMQ3bzfvKC3klXRlBC0yoArn+0QA3cf2B9T4zJ2qnvgotVbeK/b1OJRNj6Poe
+o+SsWNc/A5mw7lGScnDgL3yfwCm1gQXaQKfOt5x+7GqhWDw10q+bJpJlI10FfzAnhMF9etSqSeUR
+BRWdA5gEVduM9QEIAL53hJ5bZJ7oEDCnaY+SCzt9QsAfnFTAnZJQrvkvusJzrTQ088eUQmAjvxkf
+Rqnv981fFwGnh2+I1Ktm698UAZS9Jt8yjak9wWUICKQO5QUt5k8cHwldQXNXVXFa+TpQWQR5yW1a
+9okjh5o/3d4cBt1yZPUJJyLKY43Wvptb6EuEsScO2DnRkh5wSMDQ7dTooddJCmaq3LTjOleRFQbu
+9ij386Do6jzK69mJU56TfdcydkxkWF5NZLGnED3lq+hQNbe+8UI5tD2oP/3r5tXKgMy1R/XPvR/z
+bfwvx4FAKFOP01awLq4P3d/2xOkMu4Lu9p315E87DOleYwxk+FoTqXEAEQEAAQAH+wVyQXaNwnjQ
+xfW+M8SJNo0C7e+0d7HsuBTA/d/eP4bj6+X8RaRFVwiMvSAoxsqBNCLJP00qzzKfRQWJseD1H35z
+UjM7rNVUEL2k1yppyp61S0qj0TdhVUfJDYZqRYonVgRMvzfDTB1ryKrefKenQYL/jGd9VYMnKmWZ
+6GVk4WWXXx61iOt2HNcmSXKetMM1Mg67woPZkA3fJaXZ+zW0zMu4lTSB7yl3+vLGIFYILkCFnREr
+drQ+pmIMwozUAt+pBq8dylnkHh6g/FtRfWmLIMDqM1NlyuHRp3dyLDFdTA93osLG0QJblfX54W34
+byX7a4HASelGi3nPjjOAsTFDkuEEANV2viaWk1CV4ryDrXGmy4Xo32Md+laGPRcVfbJ0mjZjhQsO
+gWC1tjMs1qZMPhcrKIBCjjdAcAIrGV9h3CXc0uGuez4XxLO+TPBKaS0B8rKhnKph1YZuf+HrOhzS
+astDnOjNIT+qucCL/qSbdYpj9of3yY61S59WphPOBjoVM3BFBADka6ZCk81gx8jA2E1e9UqQDmdM
+FZaVA1E7++kqVSFRDJGnq+5GrBTwCJ+sevi+Rvf8Nx4AXvpCdtMBPX9RogsUFcR0pMrKBrgRo/Vg
+EpuodY2Ef1VtqXR24OxtRf1UwvHKydIsU05rzMAy5uGgQvTzRTXxZFLGUY31wjWqmo9VPQP+PnwA
+K83EV2kk2bsXwZ9MXg05iXqGQYR4bEc/12v04BtaNaDS53hBDO4JIa3Bnz+5oUoYhb8FgezUKA9I
+n6RdKTTP1BLAu8titeozpNF07V++dPiSE2wrIVsaNHL1pUwW0ql50titVwe+EglWiCKPtJBcCPUA
+3oepSPchiDjPqrNCYIkCPgQYAQIACQUCVduM9QIbLgEpCRDny6+OAR23k8BdIAQZAQIABgUCVduM
+9QAKCRAID0JGyHtSGmqYB/4m4rJbbWa7dBJ8VqRU7ZKnNRDR9CVhEGipBmpDGRYulEimOPzLUX/Z
+XZmTZzgemeXLBaJJlWnopVUWuAsyjQuZAfdd8nHkGRHG0/DGum0l4sKTta3OPGHNC1z1dAcQ1RCr
+9bTD3PxjLBczdGqhzw71trkQRBRdtPiUchltPMIyjUHqVJ0xmg0hPqFic0fICsr0YwKoz3h9+QEc
+ZHvsjSZjgydKvfLYcm+4DDMCCqcHuJrbXJKUWmJcXR0y/+HQONGrGJ5xWdO+6eJioPn2jVMnXCm4
+EKc7fcLFrz/LKmJ8seXhxjM3EdFtylBGCrx3xdK0f+JDNQaC/rhUb5V2XuX6VwoH/AtY+XsKVYRf
+NIupLOUcf/srsm3IXT4SXWVomOc9hjGQiJ3rraIbADsc+6bCAr4XNZS7moViAAcIPXFv3m3WfUln
+G/om78UjQqyVACRZqqAGmuPq+TSkRUCpt9h+A39LQWkojHqyob3cyLgy6z9Q557O9uK3lQozbw2g
+H9zC0RqnePl+rsWIUU/ga16fH6pWc1uJiEBt8UZGypQ/E56/343epmYAe0a87sHx8iDV+dNtDVKf
+PRENiLOOc19MmS+phmUyrbHqI91c0pmysYcJZCD3a502X1gpjFbPZcRtiTmGnUKdOIu60YPNE4+h
+7u2CfYyFPu3AlUaGNMBlvy6PEpU=`
+
+ TestPrivKey2 = `lQOYBFXbkJEBCADKb1ZvlT14XrJa2rTOe5924LQr2PTZlRv+651TXy33yEhelZ+V4sMrELN8fKEG
+Zy1kNixmbq3MCF/671k3LigHA7VrOaH9iiQgr6IIq2MeIkUYKZ27C992vQkYLjbYUG8+zl5h69S4
+0Ixm0yL0M54XOJ0gm+maEK1ZESKTUlDNkIS7l0jLZSYwfUeGXSEt6FWs8OgbyRTaHw4PDHrDEE9e
+Q67K6IZ3YMhPOL4fVk4Jwrp5R/RwiklT+lNozWEyFVwPFH4MeQMs9nMbt+fWlTzEA7tI4acI9yDk
+Cm1yN2R9rmY0UjODRiJw6z6sLV2T+Pf32n3MNSUOYczOjZa4VBwjABEBAAEAB/oCBqTIsxlUgLtz
+HRpWW5MJ+93xvmVV0JHhRK/ygKghq+zpC6S+cn7dwrEj1JTPh+17lyemYQK+RMeiBEduoWNKuHUd
+WX353w2411rrc/VuGTglzhd8Ir2BdJlPesCzw4JQnrWqcBqN52W+iwhnE7PWVhnvItWnx6APK5Se
+q7dzFWy8Z8tNIHm0pBQbeyo6x2rHHSWkr2fs7V02qFQhii1ayFRMcgdOWSNX6CaZJuYhk/DyjApN
+9pVhi3P1pNMpFeV0Pt8Gl1f/9o6/HpAYYEt/6vtVRhFUGgtNi95oc0oyzIJxliRvd6+Z236osigQ
+QEBwj1ImRK8TKyWPlykiJWc5BADfldgOCA55o3Qz/z/oVE1mm+a3FmPPTQlHBXotNEsrWV2wmJHe
+lNQPI6ZwMtLrBSg8PUpG2Rvao6XJ4ZBl/VcDwfcLgCnALPCcL0L0Z3vH3Sc9Ta/bQWJODG7uSaI1
+iVJ7ArKNtVzTqRQWK967mol9CCqh4A0jRrH0aVEFbrqQ/QQA58iEJaFhzFZjufjC9N8Isn3Ky7xu
+h+dk001RNCb1GnNZcx4Ld2IB+uXyYjtg7dNaUhGgGuCBo9nax89bMsBzzUukx3SHq1pxopMg6Dm8
+ImBoIAicuQWgEkaP2T0rlwCozUalJZaG1gyrzkPhkeY7CglpJycHLHfY2MIb46c8+58D/iJ83Q5j
+Y4x+sqW2QeUYFwqCcOW8Urg64UxEkgXZXiNMwTAJCaxp/Pz7cgeUDwgv+6CXEdnT1910+byzK9ha
+V1Q/65+/JYuCeyHxcoAb4Wtpdl7GALGd/1G0UAmq47yrefEr/b00uS35i1qUUhOzo1NmEZch/bvF
+kmJ+WtAHunZcOCu0EFZhdWx0IFRlc3QgS2V5IDKJATgEEwECACIFAlXbkJECGy8GCwkIBwMCBhUI
+AgkKCwQWAgMBAh4BAheAAAoJEOuDLGfrXolXqz4H/28IuoRxGKoJ064YHjPkkpoddW6zdzzNfHip
+ZnNfEUiTEls4qF1IB81M2xqfiXIFRIdO2kaLkRPFhO0hRxbtI6VuZYLgG3QCaXhxW6GyFa5zKABq
+hb5ojexdnAYRswaHV201ZCclj9rnJN1PAg0Rz6MdX/w1euEWktQxWKo42oZKyx8oT9p6lrv5KRmG
+kdrg8K8ARmRILjmwuBAgJM0eXBZHNGWXelk4YmOgnAAcZA6ZAo1G+8Pg6pKKP61ETewuCg3/u7N0
+vDttB+ZXqF88W9jAYlvdgbTtajNF5IDYDjTzWfeCaIB18F9gOzXq15SwWeDDI+CU9Nmq358IzXlx
+k4edA5gEVduQkQEIAOjZV5tbpfIh5QefpIp2dpGMVfpgPj4RNc15CyFnb8y6dhCrdybkY9GveXJe
+4F3GNYnSfB42cgxrfhizX3LakmZQ/SAg+YO5KxfCIN7Q9LPNeTgPsZZT6h8lVuXUxOFKXfRaR3/t
+GF5xE3e5CoZRsHV/c92h3t1LdJNOnC5mUKIPO4zDxiw/C2T2q3rP1kmIMaOH724kEH5A+xcp1cBH
+yt0tdHtIWuQv6joTJzujqViRhlCwQYzQSKpSBxwhBsorPvyinZI/ZXA4XXZc5RoMqV9rikedrb1r
+ENO8JOuPu6tMS+znFu67skq2gFFZwCQWIjdHm+2ukE+PE580WAWudyMAEQEAAQAH/i7ndRPI+t0T
+AdEu0dTIdyrrg3g7gd471kQtIVZZwTYSy2yhNY/Ciu72s3ab8QNCxY8dNL5bRk8FKjHslAoNSFdO
+8iZSLiDgIHOZOcjYe6pqdgQaeTHodm1Otrn2SbB+K/3oX6W/y1xe18aSojGba/nHMj5PeJbIN9Pi
+jmh0WMLD/0rmkTTxR7qQ5+kMV4O29xY4qjdYRD5O0adeZX0mNncmlmQ+rX9yxrtSgFROu1jwVtfP
+hcNetifTTshJnTwND8hux5ECEadlIVBHypW28Hth9TRBXmddTmv7L7mdtUO6DybgkpWpw4k4LPsk
+uZ6aY4wcGRp7EVfWGr9NHbq/n+0EAOlhDXIGdylkQsndjBMyhPsXZa5fFBmOyHjXj733195Jgr1v
+ZjaIomrA9cvYrmN75oKrG1jJsMEl6HfC/ZPzEj6E51/p1PRdHP7CdUUA+DG8x4M3jn+e43psVuAR
+a1XbN+8/bOa0ubt7ljVPjAEvWRSvU9dRaQz93w3fduAuM07dBAD/ayK3e0d6JMJMrU50lNOXQBgL
+rFbg4rWzPO9BJQdhjOhmOZQiUa1Q+EV+s95yIUg1OAfaMP9KRIljr5RCdGNS6WoMNBAQOSrZpelf
+jW4NpzphNfWDGVkUoPoskVtJz/nu9d860dGd3Al0kSmtUpMu5QKlo+sSxXUPbWLUn8V9/wP/ScCW
+H+0gtL4R7SFazPeTIP+Cu5oR7A/DlFVLJKa3vo+atkhSvwxHGbg04vb/W4mKhGGVtMBtlhRmaWOe
+PhUulU5FdaYsdlpN/Yd+hhgU6NHlyImPGVEHWD8c6CG8qoZfpR33j2sqshs4i/MtJZeBvl62vxPn
+9bDN7KAjFNll9axAjIkCPgQYAQIACQUCVduQkQIbLgEpCRDrgyxn616JV8BdIAQZAQIABgUCVduQ
+kQAKCRArYtevdF38xtzgB/4zVzozBpVOnagRkA7FDsHo36xX60Lik+ew0m28ueDDhnV3bXQsCvn/
+6wiCVWqLOTDeYCPlyTTpEMyk8zwdCICW6MgSkVHWcEDOrRqIrqm86rirjTGjJSgQe3l4CqJvkn6j
+ybShYoBk1OZZV6vVv9hPTXXv9E6dLKoEW5YZBrrF+VC0w1iOIvaAQ+QXph20eV4KBIrp/bhG6Pdn
+igKxuBZ79cdqDnXIzT9UiIa6LYpR0rbeg+7BmuZTTPS8t+41hIiKS+UZFdKa67eYENtyOmEMWOFC
+LLRJGxkleukchiMJ70rknloZXsvJIweXBzSZ6m7mJQBgaig/L/dXyjv6+j2pNB4H/1trYUtJjXQK
+HmqlgCmpCkHt3g7JoxWvglnDNmE6q3hIWuVIYQpnzZy1g05+X9Egwc1WVpBB02H7PkUZTfpaP/L6
+DLneMmSKPhZE3I+lPIPjwrxqh6xy5uQezcWkJTNKvPWF4FJzrVvx7XTPjfGvOB0UPEnjvtZTp5yO
+hTeZK7DgIEtb/Wcrqs+iRArQKboM930ORSZhwvGK3F9V/gMDpIrvge5vDFsTEYQdw/2epIewH0L/
+FUb/6jBRcVEpGo9Ayg+Jnhq14GOGcd1y9oMZ48kYVLVBTA9tQ+82WE8Bch7uFPj4MFOMVRn1dc3q
+dXlg3mimA+iK7tABQfG0RJ9YzWs=`
+
+ TestPrivKey3 = `lQOXBFXbkiMBCACiHW4/VI2JkfvSEINddS7vE6wEu5e1leNQDaLUh6PrATQZS2a4Q6kRE6WlJumj
+6wCeN753Cm93UGQl2Bi3USIEeArIZnPTcocrckOVXxtoLBNKXgqKvEsDXgfw8A+doSfXoDm/3Js4
+Wy3WsYKNR9LaPuJZHnpjsFAJhvRVyhH4UFD+1RTSSefq1mozPfDdMoZeZNEpfhwt3DuTJs7RqcTH
+CgR2CqhEHnOOE5jJUljHKYLCglE2+8dth1bZlQi4xly/VHZzP3Bn7wKeolK/ROP6VZz/e0xq/BKy
+resmxvlBWZ1zWwqGIrV9b0uwYvGrh2hOd5C5+5oGaA2MGcjxwaLBABEBAAEAB/dQbElFIa0VklZa
+39ZLhtbBxACSWH3ql3EtRZaB2Mh4zSALbFyJDQfScOy8AZHmv66Ozxit9X9WsYr9OzcHujgl/2da
+A3lybF6iLw1YDNaL11G6kuyn5sFP6lYGMRGOIWSik9oSVF6slo8m8ujRLdBsdMXVcElHKzCJiWmt
+JZHEnUkl9X96fIPajMBfWjHHwcaeMOc77nvjwqy5wC4EY8TSVYzxeZHL7DADQ0EHBcThlmfizpCq
+26LMVb6ju8STH7uDDFyKmhr/hC2vOkt+PKsvBCmW8/ESanO1zKPD9cvSsOWr2rZWNnkDRftqzOU5
+OCrI+3o9E74+toNb07bPntEEAMEStOzSvqZ6NKdh7EZYPA4mkkFC+EiHYIoinP1sd9V8O2Hq+dzx
+yFHtWu0LmP6uWXk45vsP9y1UMJcEa33ew5JJa7zgucI772/BNvd/Oys/PqwIAl6uNIY8uYLgmn4L
+1IPatp7vDiXzZSivPZd4yN4S4zCypZp9cnpO3qv8q7CtBADW87IA0TabdoxiN+m4XL7sYDRIfglr
+MRPAlfrkAUaGDBx/t1xb6IaKk7giFdwHpTI6+g9XNkqKqogMe4Fp+nsd1xtfsNUBn6iKZavm5kXe
+Lp9QgE+K6mvIreOTe2PKQqXqgPRG6+SRGatoKeY76fIpd8AxOJyWERxcq2lUHLn45QP/UXDTcYB7
+gzJtZrfpXN0GqQ0lYXMzbQfLnkUsu3mYzArfNy0otzEmKTkwmKclNY1/EJSzSdHfgmeA260a0nLK
+64C0wPgSmOqw90qwi5odAYSjSFBapDbyGF86JpHrLxyEEpGoXanRPwWfbiWp19Nwg6nknA87AtaM
+3+AHjbWzwCpHL7QQVmF1bHQgVGVzdCBLZXkgM4kBOAQTAQIAIgUCVduSIwIbLwYLCQgHAwIGFQgC
+CQoLBBYCAwECHgECF4AACgkQ9HlLVvwtxt1aMQf/aaGoL1rRWTUjM6DEShXFhWpV29rEjSdNk5N+
+ZwVifgdCVD5IsSjI1Z7mO2SHHiTm4eKnHAofM6/TZgzXg1YLpu8rDYJARMsM8bgK/xgxSamGjm2c
+wN220jOnwePIlG0drNTW5N6zb/K6qHoscJ6NUkjS5JPdGJuq7B0bdCM8/xSbG75gL34U5bYqK38B
+DwmW4UMl2rf/BJfxV9hmsZ2Cat4TspgyiWEKTMZI+PugXKDDwuoqgm+320K4EqFkwG4y/WwHkKgk
+hZ0+io5lzhTsvVd2p8q8VlH9GG5eA3WWQj0yqucsOmKQvcuT5y0vFY6NQJbyuioqgdlgEXtc+p0B
++Z0DmARV25IjAQgA49yN3hCBsuWoiTezoE9FHJXOCVOBR1/4jStQPJtoMl8mhtl3xTp7iGQ+9GhD
+y0l5+fP+qcP/rfBq0BslhxVOZ7jQjdUoM6ZUZzJoPGIo/V2KwqpwQl3tdCIjvagCJeYQfTL7lTCc
+4ySz+XBoAYMwZVGMcRcjp+JE8Wx9Ovzuq8wnelbU6I5dVJ7O4E1OWbIkLuytDX+fDEvfft6/oPXN
+Bl3cm6FzEuQetQQss3DOG9xnvS+DrjmMCbPwR2a++ioQ8+geoqA/kB4cAI6xOb3ncoeGDHc1i4Y9
+T9Ggi+6Aq3girmfDtNYVOM8cZUXcZNCvLkJn8DNeIvnuFUSEO+a5PwARAQABAAf/TPd98CmRNdV/
+VUI8aYT9Kkervdi4DVzsfvrHcoFn88PSJrCkVTmI6qw526Kwa6VZD0YMmll7LszLt5nD1lorDrwN
+rir3FmMzlVwge20IvXRwX4rkunYxtA2oFvL+LsEEhtXGx0ERbWRDapk+eGxQ15hxIO4Y/Cdg9E+a
+CWfQUrTSnC6qMVfVYMGfnM1yNX3OWattEFfmxQas5XqQk/0FgjCZALixdanjN/r1tjp5/2MiSD8N
+Wkemzsr6yPicnc3+BOZc5YOOnH8FqBvVHcDlSJI6pCOCEiO3Pq2QEk/1evONulbF116mLnQoGrpp
+W77l+5O42VUpZfjROCPd5DYyMQQA492CFXZpDIJ2emB9/nK8X6IzdVRK3oof8btbSNnme5afIzhs
+wR1ruX30O7ThfB+5ezpbgK1C988CWkr9SNSTy43omarafGig6/Y1RzdiITILuIGfbChoSpc70jXx
+U0nzJ/1i9yZ/vDgP3EC2miRhlDcp5w0Bu0oMBlgG/1uhj0cEAP/+7aFGP0fo2MZPhyl5feHKWj4k
+85XoAIpMBnzF6HTGU3ljAE56a+4sVw3bWB755DPhvpZvDkX60I9iIJxio8TK5ITdfjlLhxuskXyt
+ycwWI/4J+soeq4meoxK9jxZJuDl/qvoGfyzNg1oy2OBehX8+6erW46kr6Z/MQutS3zJJBACmJHrK
+VR40qD7a8KbvfuM3ruwlm5JqT/Ykq1gfKKxHjWDIUIeyBX/axGQvAGNYeuuQCzZ0+QsEWur3C4kN
+U+Pb5K1WGyOKkhJzivSI56AG3d8TA/Q0JhqST6maY0fvUoahWSCcpd7MULa3n1zx5Wsvi8mkVtup
+Js/IDi/kqneqM0XviQI+BBgBAgAJBQJV25IjAhsuASkJEPR5S1b8LcbdwF0gBBkBAgAGBQJV25Ij
+AAoJEAUj/03Hcrkg84UIAKxn9nizYtwSgDnVNb5PnD5h6+Ui6r7ffYm2o0im4YhakbFTHIPI9PRh
+BavRI5sE5Fg2vtE/x38jattoUrJoNoq9Gh9iv5PBfL3amEGjul0RRqYGl+ub+yv7YGAAHbHcdZen
+4gx15VWGpB7y3hycWbdzV8h3EAPKIm5XmB7YyXmArnI3CoJA+HtTZGoL6WZWUwka9YichGfaZ/oD
+umENg1l87Pp2RqvjLKHmv2tGCtnDzyv/IiWur9zopFQiCc8ysVgRq6CA5x5nzbv6MqRspYUS4e2I
+LFbuREA3blR+caw9oX41IYzarW8IbgeIXJ3HqUyhczRKF/z5nDKtX/kHMCqlbAgAnfu0TALnwVuj
+KeXLo4Y7OA9LTEqfORcw62q5OjSoQf/VsRSwGSefv3kGZk5N/igELluU3qpG/twZI/TSL6zGqXU2
+FOMlyMm1849TOB9b4B//4dHrjzPhztzowKMMUqeTxmSgYtFTshKN6eQ0XO+7ZuOXEmSKXS4kOUs9
+ttfzSiPNXUZL2D5nFU9H7rw3VAuXYVTrOx+Dfi6mYsscbxUbi8THODI2Q7B9Ni92DJE1OOe4+57o
+fXZ9ln24I14bna/uVHd6hBwLEE6eLCCKkHxQnnZFZduXDHMK0a0OL8RYHfMtNSem4pyC5wDQui1u
+KFIzGEPKVoBF9U7VBXpyxpsz+A==`
+
+ TestPubKey1 = `mQENBFXbjPUBCADjNjCUQwfxKL+RR2GA6pv/1K+zJZ8UWIF9S0lk7cVIEfJiprzzwiMwBS5cD0da
+rGin1FHvIWOZxujA7oW0O2TUuatqI3aAYDTfRYurh6iKLC+VS+F7H+/mhfFvKmgr0Y5kDCF1j0T/
+063QZ84IRGucR/X43IY7kAtmxGXH0dYOCzOe5UBX1fTn3mXGe2ImCDWBH7gOViynXmb6XNvXkP0f
+sF5St9jhO7mbZU9EFkv9O3t3EaURfHopsCVDOlCkFCw5ArY+DUORHRzoMX0PnkyQb5OzibkChzpg
+8hQssKeVGpuskTdz5Q7PtdW71jXd4fFVzoNH8fYwRpziD2xNvi6HABEBAAG0EFZhdWx0IFRlc3Qg
+S2V5IDGJATgEEwECACIFAlXbjPUCGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOfLr44B
+HbeTo+sH/i7bapIgPnZsJ81hmxPj4W12uvunksGJiC7d4hIHsG7kmJRTJfjECi+AuTGeDwBy84TD
+cRaOB6e79fj65Fg6HgSahDUtKJbGxj/lWzmaBuTzlN3CEe8cMwIPqPT2kajJVdOyrvkyuFOdPFOE
+A7bdCH0MqgIdM2SdF8t40k/ATfuD2K1ZmumJ508I3gF39jgTnPzD4C8quswrMQ3bzfvKC3klXRlB
+C0yoArn+0QA3cf2B9T4zJ2qnvgotVbeK/b1OJRNj6Poeo+SsWNc/A5mw7lGScnDgL3yfwCm1gQXa
+QKfOt5x+7GqhWDw10q+bJpJlI10FfzAnhMF9etSqSeURBRW5AQ0EVduM9QEIAL53hJ5bZJ7oEDCn
+aY+SCzt9QsAfnFTAnZJQrvkvusJzrTQ088eUQmAjvxkfRqnv981fFwGnh2+I1Ktm698UAZS9Jt8y
+jak9wWUICKQO5QUt5k8cHwldQXNXVXFa+TpQWQR5yW1a9okjh5o/3d4cBt1yZPUJJyLKY43Wvptb
+6EuEsScO2DnRkh5wSMDQ7dTooddJCmaq3LTjOleRFQbu9ij386Do6jzK69mJU56TfdcydkxkWF5N
+ZLGnED3lq+hQNbe+8UI5tD2oP/3r5tXKgMy1R/XPvR/zbfwvx4FAKFOP01awLq4P3d/2xOkMu4Lu
+9p315E87DOleYwxk+FoTqXEAEQEAAYkCPgQYAQIACQUCVduM9QIbLgEpCRDny6+OAR23k8BdIAQZ
+AQIABgUCVduM9QAKCRAID0JGyHtSGmqYB/4m4rJbbWa7dBJ8VqRU7ZKnNRDR9CVhEGipBmpDGRYu
+lEimOPzLUX/ZXZmTZzgemeXLBaJJlWnopVUWuAsyjQuZAfdd8nHkGRHG0/DGum0l4sKTta3OPGHN
+C1z1dAcQ1RCr9bTD3PxjLBczdGqhzw71trkQRBRdtPiUchltPMIyjUHqVJ0xmg0hPqFic0fICsr0
+YwKoz3h9+QEcZHvsjSZjgydKvfLYcm+4DDMCCqcHuJrbXJKUWmJcXR0y/+HQONGrGJ5xWdO+6eJi
+oPn2jVMnXCm4EKc7fcLFrz/LKmJ8seXhxjM3EdFtylBGCrx3xdK0f+JDNQaC/rhUb5V2XuX6VwoH
+/AtY+XsKVYRfNIupLOUcf/srsm3IXT4SXWVomOc9hjGQiJ3rraIbADsc+6bCAr4XNZS7moViAAcI
+PXFv3m3WfUlnG/om78UjQqyVACRZqqAGmuPq+TSkRUCpt9h+A39LQWkojHqyob3cyLgy6z9Q557O
+9uK3lQozbw2gH9zC0RqnePl+rsWIUU/ga16fH6pWc1uJiEBt8UZGypQ/E56/343epmYAe0a87sHx
+8iDV+dNtDVKfPRENiLOOc19MmS+phmUyrbHqI91c0pmysYcJZCD3a502X1gpjFbPZcRtiTmGnUKd
+OIu60YPNE4+h7u2CfYyFPu3AlUaGNMBlvy6PEpU=`
+
+ TestPubKey2 = `mQENBFXbkJEBCADKb1ZvlT14XrJa2rTOe5924LQr2PTZlRv+651TXy33yEhelZ+V4sMrELN8fKEG
+Zy1kNixmbq3MCF/671k3LigHA7VrOaH9iiQgr6IIq2MeIkUYKZ27C992vQkYLjbYUG8+zl5h69S4
+0Ixm0yL0M54XOJ0gm+maEK1ZESKTUlDNkIS7l0jLZSYwfUeGXSEt6FWs8OgbyRTaHw4PDHrDEE9e
+Q67K6IZ3YMhPOL4fVk4Jwrp5R/RwiklT+lNozWEyFVwPFH4MeQMs9nMbt+fWlTzEA7tI4acI9yDk
+Cm1yN2R9rmY0UjODRiJw6z6sLV2T+Pf32n3MNSUOYczOjZa4VBwjABEBAAG0EFZhdWx0IFRlc3Qg
+S2V5IDKJATgEEwECACIFAlXbkJECGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOuDLGfr
+XolXqz4H/28IuoRxGKoJ064YHjPkkpoddW6zdzzNfHipZnNfEUiTEls4qF1IB81M2xqfiXIFRIdO
+2kaLkRPFhO0hRxbtI6VuZYLgG3QCaXhxW6GyFa5zKABqhb5ojexdnAYRswaHV201ZCclj9rnJN1P
+Ag0Rz6MdX/w1euEWktQxWKo42oZKyx8oT9p6lrv5KRmGkdrg8K8ARmRILjmwuBAgJM0eXBZHNGWX
+elk4YmOgnAAcZA6ZAo1G+8Pg6pKKP61ETewuCg3/u7N0vDttB+ZXqF88W9jAYlvdgbTtajNF5IDY
+DjTzWfeCaIB18F9gOzXq15SwWeDDI+CU9Nmq358IzXlxk4e5AQ0EVduQkQEIAOjZV5tbpfIh5Qef
+pIp2dpGMVfpgPj4RNc15CyFnb8y6dhCrdybkY9GveXJe4F3GNYnSfB42cgxrfhizX3LakmZQ/SAg
++YO5KxfCIN7Q9LPNeTgPsZZT6h8lVuXUxOFKXfRaR3/tGF5xE3e5CoZRsHV/c92h3t1LdJNOnC5m
+UKIPO4zDxiw/C2T2q3rP1kmIMaOH724kEH5A+xcp1cBHyt0tdHtIWuQv6joTJzujqViRhlCwQYzQ
+SKpSBxwhBsorPvyinZI/ZXA4XXZc5RoMqV9rikedrb1rENO8JOuPu6tMS+znFu67skq2gFFZwCQW
+IjdHm+2ukE+PE580WAWudyMAEQEAAYkCPgQYAQIACQUCVduQkQIbLgEpCRDrgyxn616JV8BdIAQZ
+AQIABgUCVduQkQAKCRArYtevdF38xtzgB/4zVzozBpVOnagRkA7FDsHo36xX60Lik+ew0m28ueDD
+hnV3bXQsCvn/6wiCVWqLOTDeYCPlyTTpEMyk8zwdCICW6MgSkVHWcEDOrRqIrqm86rirjTGjJSgQ
+e3l4CqJvkn6jybShYoBk1OZZV6vVv9hPTXXv9E6dLKoEW5YZBrrF+VC0w1iOIvaAQ+QXph20eV4K
+BIrp/bhG6PdnigKxuBZ79cdqDnXIzT9UiIa6LYpR0rbeg+7BmuZTTPS8t+41hIiKS+UZFdKa67eY
+ENtyOmEMWOFCLLRJGxkleukchiMJ70rknloZXsvJIweXBzSZ6m7mJQBgaig/L/dXyjv6+j2pNB4H
+/1trYUtJjXQKHmqlgCmpCkHt3g7JoxWvglnDNmE6q3hIWuVIYQpnzZy1g05+X9Egwc1WVpBB02H7
+PkUZTfpaP/L6DLneMmSKPhZE3I+lPIPjwrxqh6xy5uQezcWkJTNKvPWF4FJzrVvx7XTPjfGvOB0U
+PEnjvtZTp5yOhTeZK7DgIEtb/Wcrqs+iRArQKboM930ORSZhwvGK3F9V/gMDpIrvge5vDFsTEYQd
+w/2epIewH0L/FUb/6jBRcVEpGo9Ayg+Jnhq14GOGcd1y9oMZ48kYVLVBTA9tQ+82WE8Bch7uFPj4
+MFOMVRn1dc3qdXlg3mimA+iK7tABQfG0RJ9YzWs=`
+
+ TestPubKey3 = `mQENBFXbkiMBCACiHW4/VI2JkfvSEINddS7vE6wEu5e1leNQDaLUh6PrATQZS2a4Q6kRE6WlJumj
+6wCeN753Cm93UGQl2Bi3USIEeArIZnPTcocrckOVXxtoLBNKXgqKvEsDXgfw8A+doSfXoDm/3Js4
+Wy3WsYKNR9LaPuJZHnpjsFAJhvRVyhH4UFD+1RTSSefq1mozPfDdMoZeZNEpfhwt3DuTJs7RqcTH
+CgR2CqhEHnOOE5jJUljHKYLCglE2+8dth1bZlQi4xly/VHZzP3Bn7wKeolK/ROP6VZz/e0xq/BKy
+resmxvlBWZ1zWwqGIrV9b0uwYvGrh2hOd5C5+5oGaA2MGcjxwaLBABEBAAG0EFZhdWx0IFRlc3Qg
+S2V5IDOJATgEEwECACIFAlXbkiMCGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEPR5S1b8
+LcbdWjEH/2mhqC9a0Vk1IzOgxEoVxYVqVdvaxI0nTZOTfmcFYn4HQlQ+SLEoyNWe5jtkhx4k5uHi
+pxwKHzOv02YM14NWC6bvKw2CQETLDPG4Cv8YMUmpho5tnMDdttIzp8HjyJRtHazU1uTes2/yuqh6
+LHCejVJI0uST3RibquwdG3QjPP8Umxu+YC9+FOW2Kit/AQ8JluFDJdq3/wSX8VfYZrGdgmreE7KY
+MolhCkzGSPj7oFygw8LqKoJvt9tCuBKhZMBuMv1sB5CoJIWdPoqOZc4U7L1XdqfKvFZR/RhuXgN1
+lkI9MqrnLDpikL3Lk+ctLxWOjUCW8roqKoHZYBF7XPqdAfm5AQ0EVduSIwEIAOPcjd4QgbLlqIk3
+s6BPRRyVzglTgUdf+I0rUDybaDJfJobZd8U6e4hkPvRoQ8tJefnz/qnD/63watAbJYcVTme40I3V
+KDOmVGcyaDxiKP1disKqcEJd7XQiI72oAiXmEH0y+5UwnOMks/lwaAGDMGVRjHEXI6fiRPFsfTr8
+7qvMJ3pW1OiOXVSezuBNTlmyJC7srQ1/nwxL337ev6D1zQZd3JuhcxLkHrUELLNwzhvcZ70vg645
+jAmz8EdmvvoqEPPoHqKgP5AeHACOsTm953KHhgx3NYuGPU/RoIvugKt4Iq5nw7TWFTjPHGVF3GTQ
+ry5CZ/AzXiL57hVEhDvmuT8AEQEAAYkCPgQYAQIACQUCVduSIwIbLgEpCRD0eUtW/C3G3cBdIAQZ
+AQIABgUCVduSIwAKCRAFI/9Nx3K5IPOFCACsZ/Z4s2LcEoA51TW+T5w+YevlIuq+332JtqNIpuGI
+WpGxUxyDyPT0YQWr0SObBORYNr7RP8d/I2rbaFKyaDaKvRofYr+TwXy92phBo7pdEUamBpfrm/sr
++2BgAB2x3HWXp+IMdeVVhqQe8t4cnFm3c1fIdxADyiJuV5ge2Ml5gK5yNwqCQPh7U2RqC+lmVlMJ
+GvWInIRn2mf6A7phDYNZfOz6dkar4yyh5r9rRgrZw88r/yIlrq/c6KRUIgnPMrFYEauggOceZ827
++jKkbKWFEuHtiCxW7kRAN25UfnGsPaF+NSGM2q1vCG4HiFydx6lMoXM0Shf8+ZwyrV/5BzAqpWwI
+AJ37tEwC58Fboynly6OGOzgPS0xKnzkXMOtquTo0qEH/1bEUsBknn795BmZOTf4oBC5blN6qRv7c
+GSP00i+sxql1NhTjJcjJtfOPUzgfW+Af/+HR648z4c7c6MCjDFKnk8ZkoGLRU7ISjenkNFzvu2bj
+lxJkil0uJDlLPbbX80ojzV1GS9g+ZxVPR+68N1QLl2FU6zsfg34upmLLHG8VG4vExzgyNkOwfTYv
+dgyRNTjnuPue6H12fZZ9uCNeG52v7lR3eoQcCxBOniwgipB8UJ52RWXblwxzCtGtDi/EWB3zLTUn
+puKcgucA0LotbihSMxhDylaARfVO1QV6csabM/g=`
+
+ TestAAPubKey1 = `-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1
+
+mQENBFXbjPUBCADjNjCUQwfxKL+RR2GA6pv/1K+zJZ8UWIF9S0lk7cVIEfJiprzz
+wiMwBS5cD0darGin1FHvIWOZxujA7oW0O2TUuatqI3aAYDTfRYurh6iKLC+VS+F7
+H+/mhfFvKmgr0Y5kDCF1j0T/063QZ84IRGucR/X43IY7kAtmxGXH0dYOCzOe5UBX
+1fTn3mXGe2ImCDWBH7gOViynXmb6XNvXkP0fsF5St9jhO7mbZU9EFkv9O3t3EaUR
+fHopsCVDOlCkFCw5ArY+DUORHRzoMX0PnkyQb5OzibkChzpg8hQssKeVGpuskTdz
+5Q7PtdW71jXd4fFVzoNH8fYwRpziD2xNvi6HABEBAAG0EFZhdWx0IFRlc3QgS2V5
+IDGJATgEEwECACIFAlXbjPUCGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJ
+EOfLr44BHbeTo+sH/i7bapIgPnZsJ81hmxPj4W12uvunksGJiC7d4hIHsG7kmJRT
+JfjECi+AuTGeDwBy84TDcRaOB6e79fj65Fg6HgSahDUtKJbGxj/lWzmaBuTzlN3C
+Ee8cMwIPqPT2kajJVdOyrvkyuFOdPFOEA7bdCH0MqgIdM2SdF8t40k/ATfuD2K1Z
+mumJ508I3gF39jgTnPzD4C8quswrMQ3bzfvKC3klXRlBC0yoArn+0QA3cf2B9T4z
+J2qnvgotVbeK/b1OJRNj6Poeo+SsWNc/A5mw7lGScnDgL3yfwCm1gQXaQKfOt5x+
+7GqhWDw10q+bJpJlI10FfzAnhMF9etSqSeURBRW5AQ0EVduM9QEIAL53hJ5bZJ7o
+EDCnaY+SCzt9QsAfnFTAnZJQrvkvusJzrTQ088eUQmAjvxkfRqnv981fFwGnh2+I
+1Ktm698UAZS9Jt8yjak9wWUICKQO5QUt5k8cHwldQXNXVXFa+TpQWQR5yW1a9okj
+h5o/3d4cBt1yZPUJJyLKY43Wvptb6EuEsScO2DnRkh5wSMDQ7dTooddJCmaq3LTj
+OleRFQbu9ij386Do6jzK69mJU56TfdcydkxkWF5NZLGnED3lq+hQNbe+8UI5tD2o
+P/3r5tXKgMy1R/XPvR/zbfwvx4FAKFOP01awLq4P3d/2xOkMu4Lu9p315E87DOle
+Ywxk+FoTqXEAEQEAAYkCPgQYAQIACQUCVduM9QIbLgEpCRDny6+OAR23k8BdIAQZ
+AQIABgUCVduM9QAKCRAID0JGyHtSGmqYB/4m4rJbbWa7dBJ8VqRU7ZKnNRDR9CVh
+EGipBmpDGRYulEimOPzLUX/ZXZmTZzgemeXLBaJJlWnopVUWuAsyjQuZAfdd8nHk
+GRHG0/DGum0l4sKTta3OPGHNC1z1dAcQ1RCr9bTD3PxjLBczdGqhzw71trkQRBRd
+tPiUchltPMIyjUHqVJ0xmg0hPqFic0fICsr0YwKoz3h9+QEcZHvsjSZjgydKvfLY
+cm+4DDMCCqcHuJrbXJKUWmJcXR0y/+HQONGrGJ5xWdO+6eJioPn2jVMnXCm4EKc7
+fcLFrz/LKmJ8seXhxjM3EdFtylBGCrx3xdK0f+JDNQaC/rhUb5V2XuX6VwoH/AtY
++XsKVYRfNIupLOUcf/srsm3IXT4SXWVomOc9hjGQiJ3rraIbADsc+6bCAr4XNZS7
+moViAAcIPXFv3m3WfUlnG/om78UjQqyVACRZqqAGmuPq+TSkRUCpt9h+A39LQWko
+jHqyob3cyLgy6z9Q557O9uK3lQozbw2gH9zC0RqnePl+rsWIUU/ga16fH6pWc1uJ
+iEBt8UZGypQ/E56/343epmYAe0a87sHx8iDV+dNtDVKfPRENiLOOc19MmS+phmUy
+rbHqI91c0pmysYcJZCD3a502X1gpjFbPZcRtiTmGnUKdOIu60YPNE4+h7u2CfYyF
+Pu3AlUaGNMBlvy6PEpU=
+=NUTS
+-----END PGP PUBLIC KEY BLOCK-----`
+)
diff --git a/vendor/github.com/hashicorp/vault/helper/policies/policies.go b/vendor/github.com/hashicorp/vault/helper/policies/policies.go
new file mode 100644
index 0000000..1e25522
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/policies/policies.go
@@ -0,0 +1,57 @@
+package policies
+
+import "sort"
+
+// ComparePolicies checks whether the given policy sets are equivalent, as in,
+// they contain the same values. The benefit of this method is that it leaves
+// the "default" policy out of its comparisons as it may be added later by core
+// after a set of policies has been saved by a backend.
+func EquivalentPolicies(a, b []string) bool {
+ if a == nil && b == nil {
+ return true
+ }
+
+ if a == nil || b == nil {
+ return false
+ }
+
+ // First we'll build maps to ensure unique values and filter default
+ mapA := map[string]bool{}
+ mapB := map[string]bool{}
+ for _, keyA := range a {
+ if keyA == "default" {
+ continue
+ }
+ mapA[keyA] = true
+ }
+ for _, keyB := range b {
+ if keyB == "default" {
+ continue
+ }
+ mapB[keyB] = true
+ }
+
+ // Now we'll build our checking slices
+ var sortedA, sortedB []string
+ for keyA, _ := range mapA {
+ sortedA = append(sortedA, keyA)
+ }
+ for keyB, _ := range mapB {
+ sortedB = append(sortedB, keyB)
+ }
+ sort.Strings(sortedA)
+ sort.Strings(sortedB)
+
+ // Finally, compare
+ if len(sortedA) != len(sortedB) {
+ return false
+ }
+
+ for i := range sortedA {
+ if sortedA[i] != sortedB[i] {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/policies/policies_test.go b/vendor/github.com/hashicorp/vault/helper/policies/policies_test.go
new file mode 100644
index 0000000..ba9b0a8
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/policies/policies_test.go
@@ -0,0 +1,26 @@
+package policies
+
+import "testing"
+
+func TestEquivalentPolicies(t *testing.T) {
+ a := []string{"foo", "bar"}
+ var b []string
+ if EquivalentPolicies(a, b) {
+ t.Fatal("bad")
+ }
+
+ b = []string{"foo"}
+ if EquivalentPolicies(a, b) {
+ t.Fatal("bad")
+ }
+
+ b = []string{"bar", "foo"}
+ if !EquivalentPolicies(a, b) {
+ t.Fatal("bad")
+ }
+
+ b = []string{"foo", "default", "bar"}
+ if !EquivalentPolicies(a, b) {
+ t.Fatal("bad")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/policyutil/policyutil.go b/vendor/github.com/hashicorp/vault/helper/policyutil/policyutil.go
new file mode 100644
index 0000000..9ac9b93
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/policyutil/policyutil.go
@@ -0,0 +1,119 @@
+package policyutil
+
+import (
+ "sort"
+ "strings"
+
+ "github.com/hashicorp/vault/helper/strutil"
+)
+
+const (
+ AddDefaultPolicy = true
+ DoNotAddDefaultPolicy = false
+)
+
+// ParsePolicies parses a comma-delimited list of policies.
+// The resulting collection will have no duplicate elements.
+// If 'root' policy was present in the list of policies, then
+// all other policies will be ignored, the result will contain
+// just the 'root'. In cases where 'root' is not present, if
+// 'default' policy is not already present, it will be added.
+func ParsePolicies(policiesRaw string) []string {
+ if policiesRaw == "" {
+ return []string{"default"}
+ }
+
+ policies := strings.Split(policiesRaw, ",")
+
+ return SanitizePolicies(policies, true)
+}
+
+// SanitizePolicies performs the common input validation tasks
+// which are performed on the list of policies across Vault.
+// The resulting collection will have no duplicate elements.
+// If 'root' policy was present in the list of policies, then
+// all other policies will be ignored, the result will contain
+// just the 'root'. In cases where 'root' is not present, if
+// 'default' policy is not already present, it will be added
+// if addDefault is set to true.
+func SanitizePolicies(policies []string, addDefault bool) []string {
+ defaultFound := false
+ for i, p := range policies {
+ policies[i] = strings.ToLower(strings.TrimSpace(p))
+ // Eliminate unnamed policies.
+ if policies[i] == "" {
+ continue
+ }
+
+ // If 'root' policy is present, ignore all other policies.
+ if policies[i] == "root" {
+ policies = []string{"root"}
+ defaultFound = true
+ break
+ }
+ if policies[i] == "default" {
+ defaultFound = true
+ }
+ }
+
+ // Always add 'default' except only if the policies contain 'root'.
+ if addDefault && (len(policies) == 0 || !defaultFound) {
+ policies = append(policies, "default")
+ }
+
+ return strutil.RemoveDuplicates(policies, true)
+}
+
+// EquivalentPolicies checks whether the given policy sets are equivalent, as in,
+// they contain the same values. The benefit of this method is that it leaves
+// the "default" policy out of its comparisons as it may be added later by core
+// after a set of policies has been saved by a backend.
+func EquivalentPolicies(a, b []string) bool {
+ if a == nil && b == nil {
+ return true
+ }
+
+ if a == nil || b == nil {
+ return false
+ }
+
+ // First we'll build maps to ensure unique values and filter default
+ mapA := map[string]bool{}
+ mapB := map[string]bool{}
+ for _, keyA := range a {
+ if keyA == "default" {
+ continue
+ }
+ mapA[keyA] = true
+ }
+ for _, keyB := range b {
+ if keyB == "default" {
+ continue
+ }
+ mapB[keyB] = true
+ }
+
+ // Now we'll build our checking slices
+ var sortedA, sortedB []string
+ for keyA, _ := range mapA {
+ sortedA = append(sortedA, keyA)
+ }
+ for keyB, _ := range mapB {
+ sortedB = append(sortedB, keyB)
+ }
+ sort.Strings(sortedA)
+ sort.Strings(sortedB)
+
+ // Finally, compare
+ if len(sortedA) != len(sortedB) {
+ return false
+ }
+
+ for i := range sortedA {
+ if sortedA[i] != sortedB[i] {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/policyutil/policyutil_test.go b/vendor/github.com/hashicorp/vault/helper/policyutil/policyutil_test.go
new file mode 100644
index 0000000..4b26483
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/policyutil/policyutil_test.go
@@ -0,0 +1,76 @@
+package policyutil
+
+import "testing"
+
+func TestSanitizePolicies(t *testing.T) {
+ expected := []string{"foo", "bar"}
+ actual := SanitizePolicies([]string{"foo", "bar"}, false)
+ if !EquivalentPolicies(expected, actual) {
+ t.Fatalf("bad: expected:%s\ngot:%s\n", expected, actual)
+ }
+
+ // If 'default' is already added, do not remove it.
+ expected = []string{"foo", "bar", "default"}
+ actual = SanitizePolicies([]string{"foo", "bar", "default"}, false)
+ if !EquivalentPolicies(expected, actual) {
+ t.Fatalf("bad: expected:%s\ngot:%s\n", expected, actual)
+ }
+}
+
+func TestParsePolicies(t *testing.T) {
+ expected := []string{"foo", "bar", "default"}
+ actual := ParsePolicies("foo,bar")
+ // add default if not present.
+ if !EquivalentPolicies(expected, actual) {
+ t.Fatalf("bad: expected:%s\ngot:%s\n", expected, actual)
+ }
+
+ // do not add default more than once.
+ actual = ParsePolicies("foo,bar,default")
+ if !EquivalentPolicies(expected, actual) {
+ t.Fatalf("bad: expected:%s\ngot:%s\n", expected, actual)
+ }
+
+ // handle spaces and tabs.
+ actual = ParsePolicies(" foo , bar , default")
+ if !EquivalentPolicies(expected, actual) {
+ t.Fatalf("bad: expected:%s\ngot:%s\n", expected, actual)
+ }
+
+ // ignore all others if root is present.
+ expected = []string{"root"}
+ actual = ParsePolicies("foo,bar,root")
+ if !EquivalentPolicies(expected, actual) {
+ t.Fatalf("bad: expected:%s\ngot:%s\n", expected, actual)
+ }
+
+ // with spaces and tabs.
+ expected = []string{"root"}
+ actual = ParsePolicies("foo ,bar, root ")
+ if !EquivalentPolicies(expected, actual) {
+ t.Fatalf("bad: expected:%s\ngot:%s\n", expected, actual)
+ }
+}
+
+func TestEquivalentPolicies(t *testing.T) {
+ a := []string{"foo", "bar"}
+ var b []string
+ if EquivalentPolicies(a, b) {
+ t.Fatal("bad")
+ }
+
+ b = []string{"foo"}
+ if EquivalentPolicies(a, b) {
+ t.Fatal("bad")
+ }
+
+ b = []string{"bar", "foo"}
+ if !EquivalentPolicies(a, b) {
+ t.Fatal("bad")
+ }
+
+ b = []string{"foo", "default", "bar"}
+ if !EquivalentPolicies(a, b) {
+ t.Fatal("bad")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/salt/salt.go b/vendor/github.com/hashicorp/vault/helper/salt/salt.go
new file mode 100644
index 0000000..3dba9eb
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/salt/salt.go
@@ -0,0 +1,170 @@
+package salt
+
+import (
+ "crypto/hmac"
+ "crypto/sha1"
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "hash"
+
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/logical"
+)
+
+const (
+ // DefaultLocation is the path in the view we store our key salt
+ // if no other path is provided.
+ DefaultLocation = "salt"
+)
+
+// Salt is used to manage a persistent salt key which is used to
+// hash values. This allows keys to be generated and recovered
+// using the global salt. Primarily, this allows paths in the storage
+// backend to be obfuscated if they may contain sensitive information.
+type Salt struct {
+ config *Config
+ salt string
+ generated bool
+}
+
+type HashFunc func([]byte) []byte
+
+// Config is used to parameterize the Salt
+type Config struct {
+ // Location is the path in the storage backend for the
+ // salt. Uses DefaultLocation if not specified.
+ Location string
+
+ // HashFunc is the hashing function to use for salting.
+ // Defaults to SHA1 if not provided.
+ HashFunc HashFunc
+
+ // HMAC allows specification of a hash function to use for
+ // the HMAC helpers
+ HMAC func() hash.Hash
+
+ // String prepended to HMAC strings for identification.
+ // Required if using HMAC
+ HMACType string
+}
+
+// NewSalt creates a new salt based on the configuration
+func NewSalt(view logical.Storage, config *Config) (*Salt, error) {
+ // Setup the configuration
+ if config == nil {
+ config = &Config{}
+ }
+ if config.Location == "" {
+ config.Location = DefaultLocation
+ }
+ if config.HashFunc == nil {
+ config.HashFunc = SHA256Hash
+ }
+ if config.HMAC == nil {
+ config.HMAC = sha256.New
+ config.HMACType = "hmac-sha256"
+ }
+
+ // Create the salt
+ s := &Salt{
+ config: config,
+ }
+
+ // Look for the salt
+ var raw *logical.StorageEntry
+ var err error
+ if view != nil {
+ raw, err = view.Get(config.Location)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read salt: %v", err)
+ }
+ }
+
+ // Restore the salt if it exists
+ if raw != nil {
+ s.salt = string(raw.Value)
+ }
+
+ // Generate a new salt if necessary
+ if s.salt == "" {
+ s.salt, err = uuid.GenerateUUID()
+ if err != nil {
+ return nil, fmt.Errorf("failed to generate uuid: %v", err)
+ }
+ s.generated = true
+ if view != nil {
+ raw := &logical.StorageEntry{
+ Key: config.Location,
+ Value: []byte(s.salt),
+ }
+ if err := view.Put(raw); err != nil {
+ return nil, fmt.Errorf("failed to persist salt: %v", err)
+ }
+ }
+ }
+
+ if config.HMAC != nil {
+ if len(config.HMACType) == 0 {
+ return nil, fmt.Errorf("HMACType must be defined")
+ }
+ }
+
+ return s, nil
+}
+
+// SaltID is used to apply a salt and hash function to an ID to make sure
+// it is not reversible
+func (s *Salt) SaltID(id string) string {
+ return SaltID(s.salt, id, s.config.HashFunc)
+}
+
+// GetHMAC is used to apply a salt and hash function to data to make sure it is
+// not reversible, with an additional HMAC
+func (s *Salt) GetHMAC(data string) string {
+ hm := hmac.New(s.config.HMAC, []byte(s.salt))
+ hm.Write([]byte(data))
+ return hex.EncodeToString(hm.Sum(nil))
+}
+
+// GetIdentifiedHMAC is used to apply a salt and hash function to data to make
+// sure it is not reversible, with an additional HMAC, and ID prepended
+func (s *Salt) GetIdentifiedHMAC(data string) string {
+ return s.config.HMACType + ":" + s.GetHMAC(data)
+}
+
+// DidGenerate returns if the underlying salt value was generated
+// on initialization or if an existing salt value was loaded
+func (s *Salt) DidGenerate() bool {
+ return s.generated
+}
+
+// SaltID is used to apply a salt and hash function to an ID to make sure
+// it is not reversible
+func SaltID(salt, id string, hash HashFunc) string {
+ comb := salt + id
+ hashVal := hash([]byte(comb))
+ return hex.EncodeToString(hashVal)
+}
+
+func HMACValue(salt, val string, hashFunc func() hash.Hash) string {
+ hm := hmac.New(hashFunc, []byte(salt))
+ hm.Write([]byte(val))
+ return hex.EncodeToString(hm.Sum(nil))
+}
+
+func HMACIdentifiedValue(salt, val, hmacType string, hashFunc func() hash.Hash) string {
+ return hmacType + ":" + HMACValue(salt, val, hashFunc)
+}
+
+// SHA1Hash returns the SHA1 of the input
+func SHA1Hash(inp []byte) []byte {
+ hashed := sha1.Sum(inp)
+ return hashed[:]
+}
+
+// SHA256Hash returns the SHA256 of the input
+func SHA256Hash(inp []byte) []byte {
+ hashed := sha256.Sum256(inp)
+ return hashed[:]
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/salt/salt_test.go b/vendor/github.com/hashicorp/vault/helper/salt/salt_test.go
new file mode 100644
index 0000000..124e66c
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/salt/salt_test.go
@@ -0,0 +1,87 @@
+package salt
+
+import (
+ "crypto/sha1"
+ "crypto/sha256"
+ "testing"
+
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestSalt(t *testing.T) {
+ inm := &logical.InmemStorage{}
+ conf := &Config{}
+
+ salt, err := NewSalt(inm, conf)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if !salt.DidGenerate() {
+ t.Fatalf("expected generation")
+ }
+
+ // Verify the salt exists
+ out, err := inm.Get(DefaultLocation)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out == nil {
+ t.Fatalf("missing salt")
+ }
+
+ // Create a new salt, should restore
+ salt2, err := NewSalt(inm, conf)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if salt2.DidGenerate() {
+ t.Fatalf("unexpected generation")
+ }
+
+ // Check for a match
+ if salt.salt != salt2.salt {
+ t.Fatalf("salt mismatch: %s %s", salt.salt, salt2.salt)
+ }
+
+ // Verify a match
+ id := "foobarbaz"
+ sid1 := salt.SaltID(id)
+ sid2 := salt2.SaltID(id)
+
+ if sid1 != sid2 {
+ t.Fatalf("mismatch")
+ }
+}
+
+func TestSaltID(t *testing.T) {
+ salt, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ id := "foobarbaz"
+
+ sid1 := SaltID(salt, id, SHA1Hash)
+ sid2 := SaltID(salt, id, SHA1Hash)
+
+ if len(sid1) != sha1.Size*2 {
+ t.Fatalf("Bad len: %d %s", len(sid1), sid1)
+ }
+
+ if sid1 != sid2 {
+ t.Fatalf("mismatch")
+ }
+
+ sid1 = SaltID(salt, id, SHA256Hash)
+ sid2 = SaltID(salt, id, SHA256Hash)
+
+ if len(sid1) != sha256.Size*2 {
+ t.Fatalf("Bad len: %d", len(sid1))
+ }
+
+ if sid1 != sid2 {
+ t.Fatalf("mismatch")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/strutil/strutil.go b/vendor/github.com/hashicorp/vault/helper/strutil/strutil.go
new file mode 100644
index 0000000..7c7f64d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/strutil/strutil.go
@@ -0,0 +1,288 @@
+package strutil
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// StrListContains looks for a string in a list of strings.
+func StrListContains(haystack []string, needle string) bool {
+ for _, item := range haystack {
+ if item == needle {
+ return true
+ }
+ }
+ return false
+}
+
+// StrListSubset checks if a given list is a subset
+// of another set
+func StrListSubset(super, sub []string) bool {
+ for _, item := range sub {
+ if !StrListContains(super, item) {
+ return false
+ }
+ }
+ return true
+}
+
+// Parses a comma separated list of strings into a slice of strings.
+// The return slice will be sorted and will not contain duplicate or
+// empty items. The values will be converted to lower case.
+func ParseDedupLowercaseAndSortStrings(input string, sep string) []string {
+ input = strings.TrimSpace(input)
+ parsed := []string{}
+ if input == "" {
+ // Don't return nil
+ return parsed
+ }
+ return RemoveDuplicates(strings.Split(input, sep), true)
+}
+
+// Parses a comma separated list of `=` tuples into a
+// map[string]string.
+func ParseKeyValues(input string, out map[string]string, sep string) error {
+ if out == nil {
+ return fmt.Errorf("'out is nil")
+ }
+
+ keyValues := ParseDedupLowercaseAndSortStrings(input, sep)
+ if len(keyValues) == 0 {
+ return nil
+ }
+
+ for _, keyValue := range keyValues {
+ shards := strings.Split(keyValue, "=")
+ key := strings.TrimSpace(shards[0])
+ value := strings.TrimSpace(shards[1])
+ if key == "" || value == "" {
+ return fmt.Errorf("invalid pair: key:'%s' value:'%s'", key, value)
+ }
+ out[key] = value
+ }
+ return nil
+}
+
+// Parses arbitrary tuples. The input can be one of
+// the following:
+// * JSON string
+// * Base64 encoded JSON string
+// * Comma separated list of `=` pairs
+// * Base64 encoded string containing comma separated list of
+// `=` pairs
+//
+// Input will be parsed into the output paramater, which should
+// be a non-nil map[string]string.
+func ParseArbitraryKeyValues(input string, out map[string]string, sep string) error {
+ input = strings.TrimSpace(input)
+ if input == "" {
+ return nil
+ }
+ if out == nil {
+ return fmt.Errorf("'out' is nil")
+ }
+
+ // Try to base64 decode the input. If successful, consider the decoded
+ // value as input.
+ inputBytes, err := base64.StdEncoding.DecodeString(input)
+ if err == nil {
+ input = string(inputBytes)
+ }
+
+ // Try to JSON unmarshal the input. If successful, consider that the
+ // metadata was supplied as JSON input.
+ err = json.Unmarshal([]byte(input), &out)
+ if err != nil {
+ // If JSON unmarshalling fails, consider that the input was
+ // supplied as a comma separated string of 'key=value' pairs.
+ if err = ParseKeyValues(input, out, sep); err != nil {
+ return fmt.Errorf("failed to parse the input: %v", err)
+ }
+ }
+
+ // Validate the parsed input
+ for key, value := range out {
+ if key != "" && value == "" {
+ return fmt.Errorf("invalid value for key '%s'", key)
+ }
+ }
+
+ return nil
+}
+
+// Parses a `sep`-separated list of strings into a
+// []string.
+//
+// The output will always be a valid slice but may be of length zero.
+func ParseStringSlice(input string, sep string) []string {
+ input = strings.TrimSpace(input)
+ if input == "" {
+ return []string{}
+ }
+
+ splitStr := strings.Split(input, sep)
+ ret := make([]string, len(splitStr))
+ for i, val := range splitStr {
+ ret[i] = val
+ }
+
+ return ret
+}
+
+// Parses arbitrary string slice. The input can be one of
+// the following:
+// * JSON string
+// * Base64 encoded JSON string
+// * `sep` separated list of values
+// * Base64-encoded string containting a `sep` separated list of values
+//
+// Note that the separator is ignored if the input is found to already be in a
+// structured format (e.g., JSON)
+//
+// The output will always be a valid slice but may be of length zero.
+func ParseArbitraryStringSlice(input string, sep string) []string {
+ input = strings.TrimSpace(input)
+ if input == "" {
+ return []string{}
+ }
+
+ // Try to base64 decode the input. If successful, consider the decoded
+ // value as input.
+ inputBytes, err := base64.StdEncoding.DecodeString(input)
+ if err == nil {
+ input = string(inputBytes)
+ }
+
+ ret := []string{}
+
+ // Try to JSON unmarshal the input. If successful, consider that the
+ // metadata was supplied as JSON input.
+ err = json.Unmarshal([]byte(input), &ret)
+ if err != nil {
+ // If JSON unmarshalling fails, consider that the input was
+ // supplied as a separated string of values.
+ return ParseStringSlice(input, sep)
+ }
+
+ if ret == nil {
+ return []string{}
+ }
+
+ return ret
+}
+
+// TrimStrings takes a slice of strings and returns a slice of strings
+// with trimmed spaces
+func TrimStrings(items []string) []string {
+ ret := make([]string, len(items))
+ for i, item := range items {
+ ret[i] = strings.TrimSpace(item)
+ }
+ return ret
+}
+
+// Removes duplicate and empty elements from a slice of strings. This also may
+// convert the items in the slice to lower case and returns a sorted slice.
+func RemoveDuplicates(items []string, lowercase bool) []string {
+ itemsMap := map[string]bool{}
+ for _, item := range items {
+ item = strings.TrimSpace(item)
+ if lowercase {
+ item = strings.ToLower(item)
+ }
+ if item == "" {
+ continue
+ }
+ itemsMap[item] = true
+ }
+ items = make([]string, 0, len(itemsMap))
+ for item, _ := range itemsMap {
+ items = append(items, item)
+ }
+ sort.Strings(items)
+ return items
+}
+
+// EquivalentSlices checks whether the given string sets are equivalent, as in,
+// they contain the same values.
+func EquivalentSlices(a, b []string) bool {
+ if a == nil && b == nil {
+ return true
+ }
+
+ if a == nil || b == nil {
+ return false
+ }
+
+ // First we'll build maps to ensure unique values
+ mapA := map[string]bool{}
+ mapB := map[string]bool{}
+ for _, keyA := range a {
+ mapA[keyA] = true
+ }
+ for _, keyB := range b {
+ mapB[keyB] = true
+ }
+
+ // Now we'll build our checking slices
+ var sortedA, sortedB []string
+ for keyA, _ := range mapA {
+ sortedA = append(sortedA, keyA)
+ }
+ for keyB, _ := range mapB {
+ sortedB = append(sortedB, keyB)
+ }
+ sort.Strings(sortedA)
+ sort.Strings(sortedB)
+
+ // Finally, compare
+ if len(sortedA) != len(sortedB) {
+ return false
+ }
+
+ for i := range sortedA {
+ if sortedA[i] != sortedB[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+// StrListDelete removes the first occurance of the given item from the slice
+// of strings if the item exists.
+func StrListDelete(s []string, d string) []string {
+ if s == nil {
+ return s
+ }
+
+ for index, element := range s {
+ if element == d {
+ return append(s[:index], s[index+1:]...)
+ }
+ }
+
+ return s
+}
+
+func GlobbedStringsMatch(item, val string) bool {
+ if len(item) < 2 {
+ return val == item
+ }
+
+ hasPrefix := strings.HasPrefix(item, "*")
+ hasSuffix := strings.HasSuffix(item, "*")
+
+ if hasPrefix && hasSuffix {
+ return strings.Contains(val, item[1:len(item)-1])
+ } else if hasPrefix {
+ return strings.HasSuffix(val, item[1:])
+ } else if hasSuffix {
+ return strings.HasPrefix(val, item[:len(item)-1])
+ }
+
+ return val == item
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/strutil/strutil_test.go b/vendor/github.com/hashicorp/vault/helper/strutil/strutil_test.go
new file mode 100644
index 0000000..9fd3bef
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/strutil/strutil_test.go
@@ -0,0 +1,326 @@
+package strutil
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "reflect"
+ "testing"
+)
+
+func TestStrUtil_StrListDelete(t *testing.T) {
+ output := StrListDelete([]string{"item1", "item2", "item3"}, "item1")
+ if StrListContains(output, "item1") {
+ t.Fatal("bad: 'item1' should not have been present")
+ }
+
+ output = StrListDelete([]string{"item1", "item2", "item3"}, "item2")
+ if StrListContains(output, "item2") {
+ t.Fatal("bad: 'item2' should not have been present")
+ }
+
+ output = StrListDelete([]string{"item1", "item2", "item3"}, "item3")
+ if StrListContains(output, "item3") {
+ t.Fatal("bad: 'item3' should not have been present")
+ }
+
+ output = StrListDelete([]string{"item1", "item1", "item3"}, "item1")
+ if !StrListContains(output, "item1") {
+ t.Fatal("bad: 'item1' should have been present")
+ }
+
+ output = StrListDelete(output, "item1")
+ if StrListContains(output, "item1") {
+ t.Fatal("bad: 'item1' should not have been present")
+ }
+
+ output = StrListDelete(output, "random")
+ if len(output) != 1 {
+ t.Fatalf("bad: expected: 1, actual: %d", len(output))
+ }
+
+ output = StrListDelete(output, "item3")
+ if StrListContains(output, "item3") {
+ t.Fatal("bad: 'item3' should not have been present")
+ }
+}
+
+func TestStrutil_EquivalentSlices(t *testing.T) {
+ slice1 := []string{"test2", "test1", "test3"}
+ slice2 := []string{"test3", "test2", "test1"}
+ if !EquivalentSlices(slice1, slice2) {
+ t.Fatalf("bad: expected a match")
+ }
+
+ slice2 = append(slice2, "test4")
+ if EquivalentSlices(slice1, slice2) {
+ t.Fatalf("bad: expected a mismatch")
+ }
+}
+
+func TestStrutil_ListContains(t *testing.T) {
+ haystack := []string{
+ "dev",
+ "ops",
+ "prod",
+ "root",
+ }
+ if StrListContains(haystack, "tubez") {
+ t.Fatalf("Bad")
+ }
+ if !StrListContains(haystack, "root") {
+ t.Fatalf("Bad")
+ }
+}
+
+func TestStrutil_ListSubset(t *testing.T) {
+ parent := []string{
+ "dev",
+ "ops",
+ "prod",
+ "root",
+ }
+ child := []string{
+ "prod",
+ "ops",
+ }
+ if !StrListSubset(parent, child) {
+ t.Fatalf("Bad")
+ }
+ if !StrListSubset(parent, parent) {
+ t.Fatalf("Bad")
+ }
+ if !StrListSubset(child, child) {
+ t.Fatalf("Bad")
+ }
+ if !StrListSubset(child, nil) {
+ t.Fatalf("Bad")
+ }
+ if StrListSubset(child, parent) {
+ t.Fatalf("Bad")
+ }
+ if StrListSubset(nil, child) {
+ t.Fatalf("Bad")
+ }
+}
+
+func TestStrutil_ParseKeyValues(t *testing.T) {
+ actual := make(map[string]string)
+ expected := map[string]string{
+ "key1": "value1",
+ "key2": "value2",
+ }
+ var input string
+ var err error
+
+ input = "key1=value1,key2=value2"
+ err = ParseKeyValues(input, actual, ",")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(expected, actual) {
+ t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual)
+ }
+ for k, _ := range actual {
+ delete(actual, k)
+ }
+
+ input = "key1 = value1, key2 = value2"
+ err = ParseKeyValues(input, actual, ",")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(expected, actual) {
+ t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual)
+ }
+ for k, _ := range actual {
+ delete(actual, k)
+ }
+
+ input = "key1 = value1, key2 = "
+ err = ParseKeyValues(input, actual, ",")
+ if err == nil {
+ t.Fatal("expected an error")
+ }
+ for k, _ := range actual {
+ delete(actual, k)
+ }
+
+ input = "key1 = value1, = value2 "
+ err = ParseKeyValues(input, actual, ",")
+ if err == nil {
+ t.Fatal("expected an error")
+ }
+ for k, _ := range actual {
+ delete(actual, k)
+ }
+}
+
+func TestStrutil_ParseArbitraryKeyValues(t *testing.T) {
+ actual := make(map[string]string)
+ expected := map[string]string{
+ "key1": "value1",
+ "key2": "value2",
+ }
+ var input string
+ var err error
+
+ // Test = as comma separated string
+ input = "key1=value1,key2=value2"
+ err = ParseArbitraryKeyValues(input, actual, ",")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(expected, actual) {
+ t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual)
+ }
+ for k, _ := range actual {
+ delete(actual, k)
+ }
+
+ // Test = as base64 encoded comma separated string
+ input = base64.StdEncoding.EncodeToString([]byte(input))
+ err = ParseArbitraryKeyValues(input, actual, ",")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(expected, actual) {
+ t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual)
+ }
+ for k, _ := range actual {
+ delete(actual, k)
+ }
+
+ // Test JSON encoded = tuples
+ input = `{"key1":"value1", "key2":"value2"}`
+ err = ParseArbitraryKeyValues(input, actual, ",")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(expected, actual) {
+ t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual)
+ }
+ for k, _ := range actual {
+ delete(actual, k)
+ }
+
+ // Test base64 encoded JSON string of = tuples
+ input = base64.StdEncoding.EncodeToString([]byte(input))
+ err = ParseArbitraryKeyValues(input, actual, ",")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(expected, actual) {
+ t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual)
+ }
+ for k, _ := range actual {
+ delete(actual, k)
+ }
+}
+
+func TestStrutil_ParseArbitraryStringSlice(t *testing.T) {
+ input := `CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}';GRANT "foo-role" TO "{{name}}";ALTER ROLE "{{name}}" SET search_path = foo;GRANT CONNECT ON DATABASE "postgres" TO "{{name}}";`
+
+ jsonExpected := []string{
+ `DO $$
+BEGIN
+ IF NOT EXISTS (SELECT * FROM pg_catalog.pg_roles WHERE rolname='foo-role') THEN
+ CREATE ROLE "foo-role";
+ CREATE SCHEMA IF NOT EXISTS foo AUTHORIZATION "foo-role";
+ ALTER ROLE "foo-role" SET search_path = foo;
+ GRANT TEMPORARY ON DATABASE "postgres" TO "foo-role";
+ GRANT ALL PRIVILEGES ON SCHEMA foo TO "foo-role";
+ GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA foo TO "foo-role";
+ GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA foo TO "foo-role";
+ GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA foo TO "foo-role";
+ END IF;
+END
+$$`,
+ `CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}'`,
+ `GRANT "foo-role" TO "{{name}}"`,
+ `ALTER ROLE "{{name}}" SET search_path = foo`,
+ `GRANT CONNECT ON DATABASE "postgres" TO "{{name}}"`,
+ ``,
+ }
+
+ nonJSONExpected := jsonExpected[1:]
+
+ var actual []string
+ var inputB64 string
+ var err error
+
+ // Test non-JSON string
+ actual = ParseArbitraryStringSlice(input, ";")
+ if !reflect.DeepEqual(nonJSONExpected, actual) {
+ t.Fatalf("bad: expected:\n%#v\nactual:\n%#v", nonJSONExpected, actual)
+ }
+
+ // Test base64-encoded non-JSON string
+ inputB64 = base64.StdEncoding.EncodeToString([]byte(input))
+ actual = ParseArbitraryStringSlice(inputB64, ";")
+ if !reflect.DeepEqual(nonJSONExpected, actual) {
+ t.Fatalf("bad: expected:\n%#v\nactual:\n%#v", nonJSONExpected, actual)
+ }
+
+ // Test JSON encoded
+ inputJSON, err := json.Marshal(jsonExpected)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ actual = ParseArbitraryStringSlice(string(inputJSON), ";")
+ if !reflect.DeepEqual(jsonExpected, actual) {
+ t.Fatalf("bad: expected:\n%#v\nactual:\n%#v", string(inputJSON), actual)
+ }
+
+ // Test base64 encoded JSON string of = tuples
+ inputB64 = base64.StdEncoding.EncodeToString(inputJSON)
+ actual = ParseArbitraryStringSlice(inputB64, ";")
+ if !reflect.DeepEqual(jsonExpected, actual) {
+ t.Fatalf("bad: expected:\n%#v\nactual:\n%#v", jsonExpected, actual)
+ }
+}
+
+func TestGlobbedStringsMatch(t *testing.T) {
+ type tCase struct {
+ item string
+ val string
+ expect bool
+ }
+
+ tCases := []tCase{
+ tCase{"", "", true},
+ tCase{"*", "*", true},
+ tCase{"**", "**", true},
+ tCase{"*t", "t", true},
+ tCase{"*t", "test", true},
+ tCase{"t*", "test", true},
+ tCase{"*test", "test", true},
+ tCase{"*test", "a test", true},
+ tCase{"test", "a test", false},
+ tCase{"*test", "tests", false},
+ tCase{"test*", "test", true},
+ tCase{"test*", "testsss", true},
+ tCase{"test**", "testsss", false},
+ tCase{"test**", "test*", true},
+ tCase{"**test", "*test", true},
+ tCase{"TEST", "test", false},
+ tCase{"test", "test", true},
+ }
+
+ for _, tc := range tCases {
+ actual := GlobbedStringsMatch(tc.item, tc.val)
+
+ if actual != tc.expect {
+ t.Fatalf("Bad testcase %#v, expected %b, got %b", tc, tc.expect, actual)
+ }
+ }
+}
+
+func TestTrimStrings(t *testing.T) {
+ input := []string{"abc", "123", "abcd ", "123 "}
+ expected := []string{"abc", "123", "abcd", "123"}
+ actual := TrimStrings(input)
+ if !reflect.DeepEqual(expected, actual) {
+ t.Fatalf("Bad TrimStrings: expected:%#v, got:%#v", expected, actual)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/tlsutil/tls.go b/vendor/github.com/hashicorp/vault/helper/tlsutil/tls.go
new file mode 100644
index 0000000..5cbd060
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/tlsutil/tls.go
@@ -0,0 +1,49 @@
+package tlsutil
+
+import (
+ "crypto/tls"
+ "fmt"
+
+ "github.com/hashicorp/vault/helper/strutil"
+)
+
+// TLSLookup maps the tls_min_version configuration to the internal value
+var TLSLookup = map[string]uint16{
+ "tls10": tls.VersionTLS10,
+ "tls11": tls.VersionTLS11,
+ "tls12": tls.VersionTLS12,
+}
+
+// ParseCiphers parse ciphersuites from the comma-separated string into recognized slice
+func ParseCiphers(cipherStr string) ([]uint16, error) {
+ suites := []uint16{}
+ ciphers := strutil.ParseStringSlice(cipherStr, ",")
+ cipherMap := map[string]uint16{
+ "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA,
+ "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
+ "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA,
+ "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA,
+ "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
+ "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
+ "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
+ "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ "TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
+ "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
+ "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+ "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+ "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+ "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+ "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+ }
+ for _, cipher := range ciphers {
+ if v, ok := cipherMap[cipher]; ok {
+ suites = append(suites, v)
+ } else {
+ return suites, fmt.Errorf("unsupported cipher %q", cipher)
+ }
+ }
+
+ return suites, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/tlsutil/tlsutil_test.go b/vendor/github.com/hashicorp/vault/helper/tlsutil/tlsutil_test.go
new file mode 100644
index 0000000..a8e9e77
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/tlsutil/tlsutil_test.go
@@ -0,0 +1,30 @@
+package tlsutil
+
+import (
+ "crypto/tls"
+ "reflect"
+ "testing"
+)
+
+func TestParseCiphers(t *testing.T) {
+ testOk := "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384"
+ v, err := ParseCiphers(testOk)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(v) != 12 {
+ t.Fatal("missed ciphers after parse")
+ }
+
+ testBad := "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,cipherX"
+ if _, err := ParseCiphers(testBad); err == nil {
+ t.Fatal("should fail on unsupported cipherX")
+ }
+
+ testOrder := "TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"
+ v, _ = ParseCiphers(testOrder)
+ expected := []uint16{tls.TLS_RSA_WITH_AES_256_GCM_SHA384, tls.TLS_RSA_WITH_AES_128_GCM_SHA256}
+ if !reflect.DeepEqual(expected, v) {
+ t.Fatal("cipher order is not preserved")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/xor/xor.go b/vendor/github.com/hashicorp/vault/helper/xor/xor.go
new file mode 100644
index 0000000..4c5f88c
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/xor/xor.go
@@ -0,0 +1,46 @@
+package xor
+
+import (
+ "encoding/base64"
+ "fmt"
+)
+
+// XORBytes takes two byte slices and XORs them together, returning the final
+// byte slice. It is an error to pass in two byte slices that do not have the
+// same length.
+func XORBytes(a, b []byte) ([]byte, error) {
+ if len(a) != len(b) {
+ return nil, fmt.Errorf("length of byte slices is not equivalent: %d != %d", len(a), len(b))
+ }
+
+ buf := make([]byte, len(a))
+
+ for i, _ := range a {
+ buf[i] = a[i] ^ b[i]
+ }
+
+ return buf, nil
+}
+
+// XORBase64 takes two base64-encoded strings and XORs the decoded byte slices
+// together, returning the final byte slice. It is an error to pass in two
+// strings that do not have the same length to their base64-decoded byte slice.
+func XORBase64(a, b string) ([]byte, error) {
+ aBytes, err := base64.StdEncoding.DecodeString(a)
+ if err != nil {
+ return nil, fmt.Errorf("error decoding first base64 value: %v", err)
+ }
+ if aBytes == nil || len(aBytes) == 0 {
+ return nil, fmt.Errorf("decoded first base64 value is nil or empty")
+ }
+
+ bBytes, err := base64.StdEncoding.DecodeString(b)
+ if err != nil {
+ return nil, fmt.Errorf("error decoding second base64 value: %v", err)
+ }
+ if bBytes == nil || len(bBytes) == 0 {
+ return nil, fmt.Errorf("decoded second base64 value is nil or empty")
+ }
+
+ return XORBytes(aBytes, bBytes)
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/xor/xor_test.go b/vendor/github.com/hashicorp/vault/helper/xor/xor_test.go
new file mode 100644
index 0000000..f50f525
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/xor/xor_test.go
@@ -0,0 +1,22 @@
+package xor
+
+import (
+ "encoding/base64"
+ "testing"
+)
+
+const (
+ tokenB64 = "ZGE0N2JiODkzYjhkMDYxYw=="
+ xorB64 = "iGiQYG9L0nIp+jRL5+Zk2w=="
+ expectedB64 = "7AmkVw0p6ksamAwv19BVuA=="
+)
+
+func TestBase64XOR(t *testing.T) {
+ ret, err := XORBase64(tokenB64, xorB64)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res := base64.StdEncoding.EncodeToString(ret); res != expectedB64 {
+ t.Fatalf("bad: %s", res)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/http/auth_token_test.go b/vendor/github.com/hashicorp/vault/http/auth_token_test.go
new file mode 100644
index 0000000..9822f7d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/auth_token_test.go
@@ -0,0 +1,208 @@
+package http
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/vault"
+)
+
+func TestAuthTokenCreate(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+
+ config := api.DefaultConfig()
+ config.Address = addr
+
+ client, err := api.NewClient(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ client.SetToken(token)
+
+ secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{
+ Lease: "1h",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if secret.Auth.LeaseDuration != 3600 {
+ t.Errorf("expected 1h, got %q", secret.Auth.LeaseDuration)
+ }
+
+ renewCreateRequest := &api.TokenCreateRequest{
+ TTL: "1h",
+ Renewable: new(bool),
+ }
+
+ secret, err = client.Auth().Token().Create(renewCreateRequest)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if secret.Auth.LeaseDuration != 3600 {
+ t.Errorf("expected 1h, got %q", secret.Auth.LeaseDuration)
+ }
+ if secret.Auth.Renewable {
+ t.Errorf("expected non-renewable token")
+ }
+
+ *renewCreateRequest.Renewable = true
+ secret, err = client.Auth().Token().Create(renewCreateRequest)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if secret.Auth.LeaseDuration != 3600 {
+ t.Errorf("expected 1h, got %q", secret.Auth.LeaseDuration)
+ }
+ if !secret.Auth.Renewable {
+ t.Errorf("expected renewable token")
+ }
+
+ explicitMaxCreateRequest := &api.TokenCreateRequest{
+ TTL: "1h",
+ ExplicitMaxTTL: "1800s",
+ }
+
+ secret, err = client.Auth().Token().Create(explicitMaxCreateRequest)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if secret.Auth.LeaseDuration != 1800 {
+ t.Errorf("expected 1800 seconds, got %q", secret.Auth.LeaseDuration)
+ }
+
+ explicitMaxCreateRequest.ExplicitMaxTTL = "2h"
+ secret, err = client.Auth().Token().Create(explicitMaxCreateRequest)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if secret.Auth.LeaseDuration != 3600 {
+ t.Errorf("expected 3600 seconds, got %q", secret.Auth.LeaseDuration)
+ }
+}
+
+func TestAuthTokenLookup(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+
+ config := api.DefaultConfig()
+ config.Address = addr
+
+ client, err := api.NewClient(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ client.SetToken(token)
+
+ // Create a new token ...
+ secret2, err := client.Auth().Token().Create(&api.TokenCreateRequest{
+ Lease: "1h",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // lookup details of this token
+ secret, err := client.Auth().Token().Lookup(secret2.Auth.ClientToken)
+ if err != nil {
+ t.Fatalf("unable to lookup details of token, err = %v", err)
+ }
+
+ if secret.Data["id"] != secret2.Auth.ClientToken {
+ t.Errorf("Did not get back details about our provided token, id returned=%s", secret.Data["id"])
+ }
+
+}
+
+func TestAuthTokenLookupSelf(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+
+ config := api.DefaultConfig()
+ config.Address = addr
+
+ client, err := api.NewClient(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ client.SetToken(token)
+
+ // you should be able to lookup your own token
+ secret, err := client.Auth().Token().LookupSelf()
+ if err != nil {
+ t.Fatalf("should be allowed to lookup self, err = %v", err)
+ }
+
+ if secret.Data["id"] != token {
+ t.Errorf("Did not get back details about our own (self) token, id returned=%s", secret.Data["id"])
+ }
+ if secret.Data["display_name"] != "root" {
+ t.Errorf("Did not get back details about our own (self) token, display_name returned=%s", secret.Data["display_name"])
+ }
+
+}
+
+func TestAuthTokenRenew(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+
+ config := api.DefaultConfig()
+ config.Address = addr
+
+ client, err := api.NewClient(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ client.SetToken(token)
+
+ // The default root token is not renewable, so this should not work
+ _, err = client.Auth().Token().Renew(token, 0)
+ if err == nil {
+ t.Fatal("should not be allowed to renew root token")
+ }
+ if !strings.Contains(err.Error(), "lease is not renewable") {
+ t.Fatalf("wrong error; got %v", err)
+ }
+
+ // Create a new token that should be renewable
+ secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{
+ Lease: "1h",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ client.SetToken(secret.Auth.ClientToken)
+
+ // Now attempt a renew with the new token
+ secret, err = client.Auth().Token().Renew(secret.Auth.ClientToken, 3600)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if secret.Auth.LeaseDuration != 3600 {
+ t.Errorf("expected 1h, got %v", secret.Auth.LeaseDuration)
+ }
+
+ if secret.Auth.Renewable != true {
+ t.Error("expected lease to be renewable")
+ }
+
+ // Do the same thing with the self variant
+ secret, err = client.Auth().Token().RenewSelf(3600)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if secret.Auth.LeaseDuration != 3600 {
+ t.Errorf("expected 1h, got %v", secret.Auth.LeaseDuration)
+ }
+
+ if secret.Auth.Renewable != true {
+ t.Error("expected lease to be renewable")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/http/forwarding_test.go b/vendor/github.com/hashicorp/vault/http/forwarding_test.go
new file mode 100644
index 0000000..fdc3b76
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/forwarding_test.go
@@ -0,0 +1,597 @@
+package http
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io"
+ "math/rand"
+ "net/http"
+ "os"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "golang.org/x/net/http2"
+
+ cleanhttp "github.com/hashicorp/go-cleanhttp"
+ "github.com/hashicorp/vault/api"
+ credCert "github.com/hashicorp/vault/builtin/credential/cert"
+ "github.com/hashicorp/vault/builtin/logical/transit"
+ "github.com/hashicorp/vault/helper/keysutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/vault"
+)
+
+func TestHTTP_Fallback_Bad_Address(t *testing.T) {
+ handler1 := http.NewServeMux()
+ handler2 := http.NewServeMux()
+ handler3 := http.NewServeMux()
+
+ coreConfig := &vault.CoreConfig{
+ LogicalBackends: map[string]logical.Factory{
+ "transit": transit.Factory,
+ },
+ ClusterAddr: "https://127.3.4.1:8382",
+ }
+
+ // Chicken-and-egg: Handler needs a core. So we create handlers first, then
+ // add routes chained to a Handler-created handler.
+ cores := vault.TestCluster(t, []http.Handler{handler1, handler2, handler3}, coreConfig, true)
+ for _, core := range cores {
+ defer core.CloseListeners()
+ }
+ handler1.Handle("/", Handler(cores[0].Core))
+ handler2.Handle("/", Handler(cores[1].Core))
+ handler3.Handle("/", Handler(cores[2].Core))
+
+ // make it easy to get access to the active
+ core := cores[0].Core
+ vault.TestWaitActive(t, core)
+
+ root := cores[0].Root
+
+ addrs := []string{
+ fmt.Sprintf("https://127.0.0.1:%d", cores[1].Listeners[0].Address.Port),
+ fmt.Sprintf("https://127.0.0.1:%d", cores[2].Listeners[0].Address.Port),
+ }
+
+ for _, addr := range addrs {
+ config := api.DefaultConfig()
+ config.Address = addr
+ config.HttpClient = cleanhttp.DefaultClient()
+ config.HttpClient.Transport.(*http.Transport).TLSClientConfig = cores[0].TLSConfig
+ client, err := api.NewClient(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ client.SetToken(root)
+
+ secret, err := client.Auth().Token().LookupSelf()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if secret == nil {
+ t.Fatal("secret is nil")
+ }
+ if secret.Data["id"].(string) != root {
+ t.Fatal("token mismatch")
+ }
+ }
+}
+
+func TestHTTP_Fallback_Disabled(t *testing.T) {
+ handler1 := http.NewServeMux()
+ handler2 := http.NewServeMux()
+ handler3 := http.NewServeMux()
+
+ coreConfig := &vault.CoreConfig{
+ LogicalBackends: map[string]logical.Factory{
+ "transit": transit.Factory,
+ },
+ ClusterAddr: "empty",
+ }
+
+ // Chicken-and-egg: Handler needs a core. So we create handlers first, then
+ // add routes chained to a Handler-created handler.
+ cores := vault.TestCluster(t, []http.Handler{handler1, handler2, handler3}, coreConfig, true)
+ for _, core := range cores {
+ defer core.CloseListeners()
+ }
+ handler1.Handle("/", Handler(cores[0].Core))
+ handler2.Handle("/", Handler(cores[1].Core))
+ handler3.Handle("/", Handler(cores[2].Core))
+
+ // make it easy to get access to the active
+ core := cores[0].Core
+ vault.TestWaitActive(t, core)
+
+ root := cores[0].Root
+
+ addrs := []string{
+ fmt.Sprintf("https://127.0.0.1:%d", cores[1].Listeners[0].Address.Port),
+ fmt.Sprintf("https://127.0.0.1:%d", cores[2].Listeners[0].Address.Port),
+ }
+
+ for _, addr := range addrs {
+ config := api.DefaultConfig()
+ config.Address = addr
+ config.HttpClient = cleanhttp.DefaultClient()
+ config.HttpClient.Transport.(*http.Transport).TLSClientConfig = cores[0].TLSConfig
+ client, err := api.NewClient(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ client.SetToken(root)
+
+ secret, err := client.Auth().Token().LookupSelf()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if secret == nil {
+ t.Fatal("secret is nil")
+ }
+ if secret.Data["id"].(string) != root {
+ t.Fatal("token mismatch")
+ }
+ }
+}
+
+// This function recreates the fuzzy testing from transit to pipe a large
+// number of requests from the standbys to the active node.
+func TestHTTP_Forwarding_Stress(t *testing.T) {
+ testHTTP_Forwarding_Stress_Common(t, false, false, 50)
+ testHTTP_Forwarding_Stress_Common(t, false, true, 50)
+ testHTTP_Forwarding_Stress_Common(t, true, false, 50)
+ testHTTP_Forwarding_Stress_Common(t, true, true, 50)
+ os.Setenv("VAULT_USE_GRPC_REQUEST_FORWARDING", "")
+}
+
+func testHTTP_Forwarding_Stress_Common(t *testing.T, rpc, parallel bool, num uint64) {
+ testPlaintext := "the quick brown fox"
+ testPlaintextB64 := "dGhlIHF1aWNrIGJyb3duIGZveA=="
+
+ if rpc {
+ os.Setenv("VAULT_USE_GRPC_REQUEST_FORWARDING", "1")
+ } else {
+ os.Setenv("VAULT_USE_GRPC_REQUEST_FORWARDING", "")
+ }
+
+ handler1 := http.NewServeMux()
+ handler2 := http.NewServeMux()
+ handler3 := http.NewServeMux()
+
+ coreConfig := &vault.CoreConfig{
+ LogicalBackends: map[string]logical.Factory{
+ "transit": transit.Factory,
+ },
+ }
+
+ // Chicken-and-egg: Handler needs a core. So we create handlers first, then
+ // add routes chained to a Handler-created handler.
+ cores := vault.TestCluster(t, []http.Handler{handler1, handler2, handler3}, coreConfig, true)
+ for _, core := range cores {
+ defer core.CloseListeners()
+ }
+ handler1.Handle("/", Handler(cores[0].Core))
+ handler2.Handle("/", Handler(cores[1].Core))
+ handler3.Handle("/", Handler(cores[2].Core))
+
+ // make it easy to get access to the active
+ core := cores[0].Core
+ vault.TestWaitActive(t, core)
+
+ root := cores[0].Root
+
+ wg := sync.WaitGroup{}
+
+ funcs := []string{"encrypt", "decrypt", "rotate", "change_min_version"}
+ keys := []string{"test1", "test2", "test3"}
+
+ hosts := []string{
+ fmt.Sprintf("https://127.0.0.1:%d/v1/transit/", cores[1].Listeners[0].Address.Port),
+ fmt.Sprintf("https://127.0.0.1:%d/v1/transit/", cores[2].Listeners[0].Address.Port),
+ }
+
+ transport := &http.Transport{
+ TLSClientConfig: cores[0].TLSConfig,
+ }
+ if err := http2.ConfigureTransport(transport); err != nil {
+ t.Fatal(err)
+ }
+
+ client := &http.Client{
+ Transport: transport,
+ CheckRedirect: func(*http.Request, []*http.Request) error {
+ return fmt.Errorf("redirects not allowed in this test")
+ },
+ }
+
+ //core.Logger().Printf("[TRACE] mounting transit")
+ req, err := http.NewRequest("POST", fmt.Sprintf("https://127.0.0.1:%d/v1/sys/mounts/transit", cores[0].Listeners[0].Address.Port),
+ bytes.NewBuffer([]byte("{\"type\": \"transit\"}")))
+ if err != nil {
+ t.Fatal(err)
+ }
+ req.Header.Set(AuthHeaderName, root)
+ _, err = client.Do(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ //core.Logger().Printf("[TRACE] done mounting transit")
+
+ var totalOps uint64
+ var successfulOps uint64
+ var key1ver int64 = 1
+ var key2ver int64 = 1
+ var key3ver int64 = 1
+ var numWorkers uint64 = 50
+ var numWorkersStarted uint64
+ var waitLock sync.Mutex
+ waitCond := sync.NewCond(&waitLock)
+
+ // This is the goroutine loop
+ doFuzzy := func(id int, parallel bool) {
+ var myTotalOps uint64
+ var mySuccessfulOps uint64
+ var keyVer int64 = 1
+ // Check for panics, otherwise notify we're done
+ defer func() {
+ if err := recover(); err != nil {
+ core.Logger().Error("got a panic: %v", err)
+ t.Fail()
+ }
+ atomic.AddUint64(&totalOps, myTotalOps)
+ atomic.AddUint64(&successfulOps, mySuccessfulOps)
+ wg.Done()
+ }()
+
+ // Holds the latest encrypted value for each key
+ latestEncryptedText := map[string]string{}
+
+ client := &http.Client{
+ Transport: transport,
+ }
+
+ var chosenFunc, chosenKey, chosenHost string
+
+ myRand := rand.New(rand.NewSource(int64(id) * 400))
+
+ doReq := func(method, url string, body io.Reader) (*http.Response, error) {
+ req, err := http.NewRequest(method, url, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set(AuthHeaderName, root)
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+ }
+
+ doResp := func(resp *http.Response) (*api.Secret, error) {
+ if resp == nil {
+ return nil, fmt.Errorf("nil response")
+ }
+ defer resp.Body.Close()
+
+ // Make sure we weren't redirected
+ if resp.StatusCode > 300 && resp.StatusCode < 400 {
+ return nil, fmt.Errorf("got status code %d, resp was %#v", resp.StatusCode, *resp)
+ }
+
+ result := &api.Response{Response: resp}
+ err := result.Error()
+ if err != nil {
+ return nil, err
+ }
+
+ secret, err := api.ParseSecret(result.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ return secret, nil
+ }
+
+ for _, chosenHost := range hosts {
+ for _, chosenKey := range keys {
+ // Try to write the key to make sure it exists
+ _, err := doReq("POST", chosenHost+"keys/"+fmt.Sprintf("%s-%t", chosenKey, parallel), bytes.NewBuffer([]byte("{}")))
+ if err != nil {
+ panic(err)
+ }
+ }
+ }
+
+ if !parallel {
+ chosenHost = hosts[id%len(hosts)]
+ chosenKey = fmt.Sprintf("key-%t-%d", parallel, id)
+
+ _, err := doReq("POST", chosenHost+"keys/"+chosenKey, bytes.NewBuffer([]byte("{}")))
+ if err != nil {
+ panic(err)
+ }
+ }
+
+ atomic.AddUint64(&numWorkersStarted, 1)
+
+ waitCond.L.Lock()
+ for atomic.LoadUint64(&numWorkersStarted) != numWorkers {
+ waitCond.Wait()
+ }
+ waitCond.L.Unlock()
+ waitCond.Broadcast()
+
+ core.Logger().Trace("Starting goroutine", "id", id)
+
+ startTime := time.Now()
+ for {
+ // Stop after 10 seconds
+ if time.Now().Sub(startTime) > 10*time.Second {
+ return
+ }
+
+ myTotalOps++
+
+ // Pick a function and a key
+ chosenFunc = funcs[myRand.Int()%len(funcs)]
+ if parallel {
+ chosenKey = fmt.Sprintf("%s-%t", keys[myRand.Int()%len(keys)], parallel)
+ chosenHost = hosts[myRand.Int()%len(hosts)]
+ }
+
+ switch chosenFunc {
+ // Encrypt our plaintext and store the result
+ case "encrypt":
+ //core.Logger().Printf("[TRACE] %s, %s, %d", chosenFunc, chosenKey, id)
+ resp, err := doReq("POST", chosenHost+"encrypt/"+chosenKey, bytes.NewBuffer([]byte(fmt.Sprintf("{\"plaintext\": \"%s\"}", testPlaintextB64))))
+ if err != nil {
+ panic(err)
+ }
+
+ secret, err := doResp(resp)
+ if err != nil {
+ panic(err)
+ }
+
+ latest := secret.Data["ciphertext"].(string)
+ if latest == "" {
+ panic(fmt.Errorf("bad ciphertext"))
+ }
+ latestEncryptedText[chosenKey] = secret.Data["ciphertext"].(string)
+
+ mySuccessfulOps++
+
+ // Decrypt the ciphertext and compare the result
+ case "decrypt":
+ ct := latestEncryptedText[chosenKey]
+ if ct == "" {
+ mySuccessfulOps++
+ continue
+ }
+
+ //core.Logger().Printf("[TRACE] %s, %s, %d", chosenFunc, chosenKey, id)
+ resp, err := doReq("POST", chosenHost+"decrypt/"+chosenKey, bytes.NewBuffer([]byte(fmt.Sprintf("{\"ciphertext\": \"%s\"}", ct))))
+ if err != nil {
+ panic(err)
+ }
+
+ secret, err := doResp(resp)
+ if err != nil {
+ // This could well happen since the min version is jumping around
+ if strings.Contains(err.Error(), keysutil.ErrTooOld) {
+ mySuccessfulOps++
+ continue
+ }
+ panic(err)
+ }
+
+ ptb64 := secret.Data["plaintext"].(string)
+ pt, err := base64.StdEncoding.DecodeString(ptb64)
+ if err != nil {
+ panic(fmt.Errorf("got an error decoding base64 plaintext: %v", err))
+ }
+ if string(pt) != testPlaintext {
+ panic(fmt.Errorf("got bad plaintext back: %s", pt))
+ }
+
+ mySuccessfulOps++
+
+ // Rotate to a new key version
+ case "rotate":
+ //core.Logger().Printf("[TRACE] %s, %s, %d", chosenFunc, chosenKey, id)
+ _, err := doReq("POST", chosenHost+"keys/"+chosenKey+"/rotate", bytes.NewBuffer([]byte("{}")))
+ if err != nil {
+ panic(err)
+ }
+ if parallel {
+ switch chosenKey {
+ case "test1":
+ atomic.AddInt64(&key1ver, 1)
+ case "test2":
+ atomic.AddInt64(&key2ver, 1)
+ case "test3":
+ atomic.AddInt64(&key3ver, 1)
+ }
+ } else {
+ keyVer++
+ }
+
+ mySuccessfulOps++
+
+ // Change the min version, which also tests the archive functionality
+ case "change_min_version":
+ var latestVersion int64 = keyVer
+ if parallel {
+ switch chosenKey {
+ case "test1":
+ latestVersion = atomic.LoadInt64(&key1ver)
+ case "test2":
+ latestVersion = atomic.LoadInt64(&key2ver)
+ case "test3":
+ latestVersion = atomic.LoadInt64(&key3ver)
+ }
+ }
+
+ setVersion := (myRand.Int63() % latestVersion) + 1
+
+ //core.Logger().Printf("[TRACE] %s, %s, %d, new min version %d", chosenFunc, chosenKey, id, setVersion)
+
+ _, err := doReq("POST", chosenHost+"keys/"+chosenKey+"/config", bytes.NewBuffer([]byte(fmt.Sprintf("{\"min_decryption_version\": %d}", setVersion))))
+ if err != nil {
+ panic(err)
+ }
+
+ mySuccessfulOps++
+ }
+ }
+ }
+
+ atomic.StoreUint64(&numWorkers, num)
+
+ // Spawn some of these workers for 10 seconds
+ for i := 0; i < int(atomic.LoadUint64(&numWorkers)); i++ {
+ wg.Add(1)
+ //core.Logger().Printf("[TRACE] spawning %d", i)
+ go doFuzzy(i+1, parallel)
+ }
+
+ // Wait for them all to finish
+ wg.Wait()
+
+ if totalOps == 0 || totalOps != successfulOps {
+ t.Fatalf("total/successful ops zero or mismatch: %d/%d; rpc: %t, parallel: %t, num %d", totalOps, successfulOps, rpc, parallel, num)
+ }
+ t.Logf("total operations tried: %d, total successful: %d; rpc: %t, parallel: %t, num %d", totalOps, successfulOps, rpc, parallel, num)
+}
+
+// This tests TLS connection state forwarding by ensuring that we can use a
+// client TLS to authenticate against the cert backend
+func TestHTTP_Forwarding_ClientTLS(t *testing.T) {
+ handler1 := http.NewServeMux()
+ handler2 := http.NewServeMux()
+ handler3 := http.NewServeMux()
+
+ coreConfig := &vault.CoreConfig{
+ CredentialBackends: map[string]logical.Factory{
+ "cert": credCert.Factory,
+ },
+ }
+
+ // Chicken-and-egg: Handler needs a core. So we create handlers first, then
+ // add routes chained to a Handler-created handler.
+ cores := vault.TestCluster(t, []http.Handler{handler1, handler2, handler3}, coreConfig, true)
+ for _, core := range cores {
+ defer core.CloseListeners()
+ }
+ handler1.Handle("/", Handler(cores[0].Core))
+ handler2.Handle("/", Handler(cores[1].Core))
+ handler3.Handle("/", Handler(cores[2].Core))
+
+ // make it easy to get access to the active
+ core := cores[0].Core
+ vault.TestWaitActive(t, core)
+
+ root := cores[0].Root
+
+ transport := cleanhttp.DefaultTransport()
+ transport.TLSClientConfig = cores[0].TLSConfig
+ if err := http2.ConfigureTransport(transport); err != nil {
+ t.Fatal(err)
+ }
+
+ client := &http.Client{
+ Transport: transport,
+ }
+
+ req, err := http.NewRequest("POST", fmt.Sprintf("https://127.0.0.1:%d/v1/sys/auth/cert", cores[0].Listeners[0].Address.Port),
+ bytes.NewBuffer([]byte("{\"type\": \"cert\"}")))
+ if err != nil {
+ t.Fatal(err)
+ }
+ req.Header.Set(AuthHeaderName, root)
+ _, err = client.Do(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ type certConfig struct {
+ Certificate string `json:"certificate"`
+ Policies string `json:"policies"`
+ }
+ encodedCertConfig, err := json.Marshal(&certConfig{
+ Certificate: vault.TestClusterCACert,
+ Policies: "default",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ req, err = http.NewRequest("POST", fmt.Sprintf("https://127.0.0.1:%d/v1/auth/cert/certs/test", cores[0].Listeners[0].Address.Port),
+ bytes.NewBuffer(encodedCertConfig))
+ if err != nil {
+ t.Fatal(err)
+ }
+ req.Header.Set(AuthHeaderName, root)
+ _, err = client.Do(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ addrs := []string{
+ fmt.Sprintf("https://127.0.0.1:%d", cores[1].Listeners[0].Address.Port),
+ fmt.Sprintf("https://127.0.0.1:%d", cores[2].Listeners[0].Address.Port),
+ }
+
+ // Ensure we can't possibly use lingering connections even though it should be to a different address
+
+ transport = cleanhttp.DefaultTransport()
+ transport.TLSClientConfig = cores[0].TLSConfig
+
+ client = &http.Client{
+ Transport: transport,
+ CheckRedirect: func(*http.Request, []*http.Request) error {
+ return fmt.Errorf("redirects not allowed in this test")
+ },
+ }
+
+ //cores[0].Logger().Printf("root token is %s", root)
+ //time.Sleep(4 * time.Hour)
+
+ for _, addr := range addrs {
+ client := cores[0].Client
+ client.SetAddress(addr)
+
+ secret, err := client.Logical().Write("auth/cert/login", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if secret == nil {
+ t.Fatal("secret is nil")
+ }
+ if secret.Auth == nil {
+ t.Fatal("auth is nil")
+ }
+ if secret.Auth.Policies == nil || len(secret.Auth.Policies) == 0 || secret.Auth.Policies[0] != "default" {
+ t.Fatalf("bad policies: %#v", secret.Auth.Policies)
+ }
+ if secret.Auth.ClientToken == "" {
+ t.Fatalf("bad client token: %#v", *secret.Auth)
+ }
+ client.SetToken(secret.Auth.ClientToken)
+ secret, err = client.Auth().Token().LookupSelf()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if secret == nil {
+ t.Fatal("secret is nil")
+ }
+ if secret.Data == nil || len(secret.Data) == 0 {
+ t.Fatal("secret data was empty")
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/http/handler.go b/vendor/github.com/hashicorp/vault/http/handler.go
new file mode 100644
index 0000000..fb9b7a8
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/handler.go
@@ -0,0 +1,353 @@
+package http
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/hashicorp/errwrap"
+ "github.com/hashicorp/vault/helper/consts"
+ "github.com/hashicorp/vault/helper/parseutil"
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/vault"
+)
+
+const (
+ // AuthHeaderName is the name of the header containing the token.
+ AuthHeaderName = "X-Vault-Token"
+
+ // WrapTTLHeaderName is the name of the header containing a directive to
+ // wrap the response
+ WrapTTLHeaderName = "X-Vault-Wrap-TTL"
+
+ // WrapFormatHeaderName is the name of the header containing the format to
+ // wrap in; has no effect if the wrap TTL is not set
+ WrapFormatHeaderName = "X-Vault-Wrap-Format"
+
+ // NoRequestForwardingHeaderName is the name of the header telling Vault
+ // not to use request forwarding
+ NoRequestForwardingHeaderName = "X-Vault-No-Request-Forwarding"
+
+ // MaxRequestSize is the maximum accepted request size. This is to prevent
+ // a denial of service attack where no Content-Length is provided and the server
+ // is fed ever more data until it exhausts memory.
+ MaxRequestSize = 32 * 1024 * 1024
+)
+
+// Handler returns an http.Handler for the API. This can be used on
+// its own to mount the Vault API within another web server.
+func Handler(core *vault.Core) http.Handler {
+ // Create the muxer to handle the actual endpoints
+ mux := http.NewServeMux()
+ mux.Handle("/v1/sys/init", handleSysInit(core))
+ mux.Handle("/v1/sys/seal-status", handleSysSealStatus(core))
+ mux.Handle("/v1/sys/seal", handleSysSeal(core))
+ mux.Handle("/v1/sys/step-down", handleSysStepDown(core))
+ mux.Handle("/v1/sys/unseal", handleSysUnseal(core))
+ mux.Handle("/v1/sys/renew", handleRequestForwarding(core, handleLogical(core, false, nil)))
+ mux.Handle("/v1/sys/renew/", handleRequestForwarding(core, handleLogical(core, false, nil)))
+ mux.Handle("/v1/sys/leader", handleSysLeader(core))
+ mux.Handle("/v1/sys/health", handleSysHealth(core))
+ mux.Handle("/v1/sys/generate-root/attempt", handleRequestForwarding(core, handleSysGenerateRootAttempt(core)))
+ mux.Handle("/v1/sys/generate-root/update", handleRequestForwarding(core, handleSysGenerateRootUpdate(core)))
+ mux.Handle("/v1/sys/rekey/init", handleRequestForwarding(core, handleSysRekeyInit(core, false)))
+ mux.Handle("/v1/sys/rekey/update", handleRequestForwarding(core, handleSysRekeyUpdate(core, false)))
+ mux.Handle("/v1/sys/rekey-recovery-key/init", handleRequestForwarding(core, handleSysRekeyInit(core, true)))
+ mux.Handle("/v1/sys/rekey-recovery-key/update", handleRequestForwarding(core, handleSysRekeyUpdate(core, true)))
+ mux.Handle("/v1/sys/wrapping/lookup", handleRequestForwarding(core, handleLogical(core, false, wrappingVerificationFunc)))
+ mux.Handle("/v1/sys/wrapping/rewrap", handleRequestForwarding(core, handleLogical(core, false, wrappingVerificationFunc)))
+ mux.Handle("/v1/sys/wrapping/unwrap", handleRequestForwarding(core, handleLogical(core, false, wrappingVerificationFunc)))
+ mux.Handle("/v1/sys/capabilities-self", handleRequestForwarding(core, handleLogical(core, true, nil)))
+ mux.Handle("/v1/sys/", handleRequestForwarding(core, handleLogical(core, true, nil)))
+ mux.Handle("/v1/", handleRequestForwarding(core, handleLogical(core, false, nil)))
+
+ // Wrap the handler in another handler to trigger all help paths.
+ helpWrappedHandler := wrapHelpHandler(mux, core)
+
+ // Wrap the help wrapped handler with another layer with a generic
+ // handler
+ genericWrappedHandler := wrapGenericHandler(helpWrappedHandler)
+
+ return genericWrappedHandler
+}
+
+// wrapGenericHandler wraps the handler with an extra layer of handler where
+// tasks that should be commonly handled for all the requests and/or responses
+// are performed.
+func wrapGenericHandler(h http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Set the Cache-Control header for all the responses returned
+ // by Vault
+ w.Header().Set("Cache-Control", "no-store")
+ h.ServeHTTP(w, r)
+ return
+ })
+}
+
+// A lookup on a token that is about to expire returns nil, which means by the
+// time we can validate a wrapping token lookup will return nil since it will
+// be revoked after the call. So we have to do the validation here.
+func wrappingVerificationFunc(core *vault.Core, req *logical.Request) error {
+ if req == nil {
+ return fmt.Errorf("invalid request")
+ }
+
+ valid, err := core.ValidateWrappingToken(req)
+ if err != nil {
+ return fmt.Errorf("error validating wrapping token: %v", err)
+ }
+ if !valid {
+ return fmt.Errorf("wrapping token is not valid or does not exist")
+ }
+
+ return nil
+}
+
+// stripPrefix is a helper to strip a prefix from the path. It will
+// return false from the second return value if it the prefix doesn't exist.
+func stripPrefix(prefix, path string) (string, bool) {
+ if !strings.HasPrefix(path, prefix) {
+ return "", false
+ }
+
+ path = path[len(prefix):]
+ if path == "" {
+ return "", false
+ }
+
+ return path, true
+}
+
+func parseRequest(r *http.Request, w http.ResponseWriter, out interface{}) error {
+ // Limit the maximum number of bytes to MaxRequestSize to protect
+ // against an indefinite amount of data being read.
+ limit := http.MaxBytesReader(w, r.Body, MaxRequestSize)
+ err := jsonutil.DecodeJSONFromReader(limit, out)
+ if err != nil && err != io.EOF {
+ return errwrap.Wrapf("failed to parse JSON input: {{err}}", err)
+ }
+ return err
+}
+
+// handleRequestForwarding determines whether to forward a request or not,
+// falling back on the older behavior of redirecting the client
+func handleRequestForwarding(core *vault.Core, handler http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.Header.Get(vault.IntNoForwardingHeaderName) != "" {
+ handler.ServeHTTP(w, r)
+ return
+ }
+
+ if r.Header.Get(NoRequestForwardingHeaderName) != "" {
+ // Forwarding explicitly disabled, fall back to previous behavior
+ core.Logger().Trace("http/handleRequestForwarding: forwarding disabled by client request")
+ handler.ServeHTTP(w, r)
+ return
+ }
+
+ // Note: in an HA setup, this call will also ensure that connections to
+ // the leader are set up, as that happens once the advertised cluster
+ // values are read during this function
+ isLeader, leaderAddr, err := core.Leader()
+ if err != nil {
+ if err == vault.ErrHANotEnabled {
+ // Standalone node, serve request normally
+ handler.ServeHTTP(w, r)
+ return
+ }
+ // Some internal error occurred
+ respondError(w, http.StatusInternalServerError, err)
+ return
+ }
+ if isLeader {
+ // No forwarding needed, we're leader
+ handler.ServeHTTP(w, r)
+ return
+ }
+ if leaderAddr == "" {
+ respondError(w, http.StatusInternalServerError, fmt.Errorf("node not active but active node not found"))
+ return
+ }
+
+ // Attempt forwarding the request. If we cannot forward -- perhaps it's
+ // been disabled on the active node -- this will return with an
+ // ErrCannotForward and we simply fall back
+ statusCode, header, retBytes, err := core.ForwardRequest(r)
+ if err != nil {
+ if err == vault.ErrCannotForward {
+ core.Logger().Trace("http/handleRequestForwarding: cannot forward (possibly disabled on active node), falling back")
+ } else {
+ core.Logger().Error("http/handleRequestForwarding: error forwarding request", "error", err)
+ }
+
+ // Fall back to redirection
+ handler.ServeHTTP(w, r)
+ return
+ }
+
+ if header != nil {
+ for k, v := range header {
+ for _, j := range v {
+ w.Header().Add(k, j)
+ }
+ }
+ }
+
+ w.WriteHeader(statusCode)
+ w.Write(retBytes)
+ return
+ })
+}
+
+// request is a helper to perform a request and properly exit in the
+// case of an error.
+func request(core *vault.Core, w http.ResponseWriter, rawReq *http.Request, r *logical.Request) (*logical.Response, bool) {
+ resp, err := core.HandleRequest(r)
+ if errwrap.Contains(err, consts.ErrStandby.Error()) {
+ respondStandby(core, w, rawReq.URL)
+ return resp, false
+ }
+ if respondErrorCommon(w, r, resp, err) {
+ return resp, false
+ }
+
+ return resp, true
+}
+
+// respondStandby is used to trigger a redirect in the case that this Vault is currently a hot standby
+func respondStandby(core *vault.Core, w http.ResponseWriter, reqURL *url.URL) {
+ // Request the leader address
+ _, redirectAddr, err := core.Leader()
+ if err != nil {
+ respondError(w, http.StatusInternalServerError, err)
+ return
+ }
+
+ // If there is no leader, generate a 503 error
+ if redirectAddr == "" {
+ err = fmt.Errorf("no active Vault instance found")
+ respondError(w, http.StatusServiceUnavailable, err)
+ return
+ }
+
+ // Parse the redirect location
+ redirectURL, err := url.Parse(redirectAddr)
+ if err != nil {
+ respondError(w, http.StatusInternalServerError, err)
+ return
+ }
+
+ // Generate a redirect URL
+ finalURL := url.URL{
+ Scheme: redirectURL.Scheme,
+ Host: redirectURL.Host,
+ Path: reqURL.Path,
+ RawQuery: reqURL.RawQuery,
+ }
+
+ // Ensure there is a scheme, default to https
+ if finalURL.Scheme == "" {
+ finalURL.Scheme = "https"
+ }
+
+ // If we have an address, redirect! We use a 307 code
+ // because we don't actually know if its permanent and
+ // the request method should be preserved.
+ w.Header().Set("Location", finalURL.String())
+ w.WriteHeader(307)
+}
+
+// requestAuth adds the token to the logical.Request if it exists.
+func requestAuth(core *vault.Core, r *http.Request, req *logical.Request) *logical.Request {
+ // Attach the header value if we have it
+ if v := r.Header.Get(AuthHeaderName); v != "" {
+ req.ClientToken = v
+
+ // Also attach the accessor if we have it. This doesn't fail if it
+ // doesn't exist because the request may be to an unauthenticated
+ // endpoint/login endpoint where a bad current token doesn't matter, or
+ // a token from a Vault version pre-accessors.
+ te, err := core.LookupToken(v)
+ if err == nil && te != nil {
+ req.ClientTokenAccessor = te.Accessor
+ req.ClientTokenRemainingUses = te.NumUses
+ }
+ }
+
+ return req
+}
+
+// requestWrapInfo adds the WrapInfo value to the logical.Request if wrap info exists
+func requestWrapInfo(r *http.Request, req *logical.Request) (*logical.Request, error) {
+ // First try for the header value
+ wrapTTL := r.Header.Get(WrapTTLHeaderName)
+ if wrapTTL == "" {
+ return req, nil
+ }
+
+ // If it has an allowed suffix parse as a duration string
+ dur, err := parseutil.ParseDurationSecond(wrapTTL)
+ if err != nil {
+ return req, err
+ }
+ if int64(dur) < 0 {
+ return req, fmt.Errorf("requested wrap ttl cannot be negative")
+ }
+
+ req.WrapInfo = &logical.RequestWrapInfo{
+ TTL: dur,
+ }
+
+ wrapFormat := r.Header.Get(WrapFormatHeaderName)
+ switch wrapFormat {
+ case "jwt":
+ req.WrapInfo.Format = "jwt"
+ }
+
+ return req, nil
+}
+
+func respondError(w http.ResponseWriter, status int, err error) {
+ logical.AdjustErrorStatusCode(&status, err)
+
+ w.Header().Add("Content-Type", "application/json")
+ w.WriteHeader(status)
+
+ resp := &ErrorResponse{Errors: make([]string, 0, 1)}
+ if err != nil {
+ resp.Errors = append(resp.Errors, err.Error())
+ }
+
+ enc := json.NewEncoder(w)
+ enc.Encode(resp)
+}
+
+func respondErrorCommon(w http.ResponseWriter, req *logical.Request, resp *logical.Response, err error) bool {
+ statusCode, newErr := logical.RespondErrorCommon(req, resp, err)
+ if newErr == nil && statusCode == 0 {
+ return false
+ }
+
+ respondError(w, statusCode, newErr)
+ return true
+}
+
+func respondOk(w http.ResponseWriter, body interface{}) {
+ w.Header().Add("Content-Type", "application/json")
+
+ if body == nil {
+ w.WriteHeader(http.StatusNoContent)
+ } else {
+ w.WriteHeader(http.StatusOK)
+ enc := json.NewEncoder(w)
+ enc.Encode(body)
+ }
+}
+
+type ErrorResponse struct {
+ Errors []string `json:"errors"`
+}
diff --git a/vendor/github.com/hashicorp/vault/http/handler_test.go b/vendor/github.com/hashicorp/vault/http/handler_test.go
new file mode 100644
index 0000000..149e603
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/handler_test.go
@@ -0,0 +1,245 @@
+package http
+
+import (
+ "encoding/json"
+ "errors"
+ "net/http"
+ "net/http/httptest"
+ "reflect"
+ "testing"
+
+ "github.com/hashicorp/go-cleanhttp"
+ "github.com/hashicorp/vault/helper/consts"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/vault"
+)
+
+func TestHandler_CacheControlNoStore(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+
+ req, err := http.NewRequest("GET", addr+"/v1/sys/mounts", nil)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ req.Header.Set(AuthHeaderName, token)
+ req.Header.Set(WrapTTLHeaderName, "60s")
+
+ client := cleanhttp.DefaultClient()
+ resp, err := client.Do(req)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if resp == nil {
+ t.Fatalf("nil response")
+ }
+
+ actual := resp.Header.Get("Cache-Control")
+
+ if actual == "" {
+ t.Fatalf("missing 'Cache-Control' header entry in response writer")
+ }
+
+ if actual != "no-store" {
+ t.Fatalf("bad: Cache-Control. Expected: 'no-store', Actual: %q", actual)
+ }
+}
+
+// We use this test to verify header auth
+func TestSysMounts_headerAuth(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+
+ req, err := http.NewRequest("GET", addr+"/v1/sys/mounts", nil)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ req.Header.Set(AuthHeaderName, token)
+
+ client := cleanhttp.DefaultClient()
+ resp, err := client.Do(req)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": json.Number("0"),
+ "wrap_info": nil,
+ "warnings": nil,
+ "auth": nil,
+ "data": map[string]interface{}{
+ "secret/": map[string]interface{}{
+ "description": "generic secret storage",
+ "type": "generic",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "sys/": map[string]interface{}{
+ "description": "system endpoints used for control, policy and debugging",
+ "type": "system",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "cubbyhole/": map[string]interface{}{
+ "description": "per-token private secret storage",
+ "type": "cubbyhole",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": true,
+ },
+ },
+ "secret/": map[string]interface{}{
+ "description": "generic secret storage",
+ "type": "generic",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "sys/": map[string]interface{}{
+ "description": "system endpoints used for control, policy and debugging",
+ "type": "system",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "cubbyhole/": map[string]interface{}{
+ "description": "per-token private secret storage",
+ "type": "cubbyhole",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": true,
+ },
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+
+ expected["request_id"] = actual["request_id"]
+
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad:\nExpected: %#v\nActual: %#v\n", expected, actual)
+ }
+}
+
+// We use this test to verify header auth wrapping
+func TestSysMounts_headerAuth_Wrapped(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+
+ req, err := http.NewRequest("GET", addr+"/v1/sys/mounts", nil)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ req.Header.Set(AuthHeaderName, token)
+ req.Header.Set(WrapTTLHeaderName, "60s")
+
+ client := cleanhttp.DefaultClient()
+ resp, err := client.Do(req)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "request_id": "",
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": json.Number("0"),
+ "data": nil,
+ "wrap_info": map[string]interface{}{
+ "ttl": json.Number("60"),
+ },
+ "warnings": nil,
+ "auth": nil,
+ }
+
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+
+ actualToken, ok := actual["wrap_info"].(map[string]interface{})["token"]
+ if !ok || actualToken == "" {
+ t.Fatal("token missing in wrap info")
+ }
+ expected["wrap_info"].(map[string]interface{})["token"] = actualToken
+
+ actualCreationTime, ok := actual["wrap_info"].(map[string]interface{})["creation_time"]
+ if !ok || actualCreationTime == "" {
+ t.Fatal("creation_time missing in wrap info")
+ }
+ expected["wrap_info"].(map[string]interface{})["creation_time"] = actualCreationTime
+
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad:\nExpected: %#v\nActual: %#v\n%T %T", expected, actual, actual["warnings"], actual["data"])
+ }
+}
+
+func TestHandler_sealed(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+
+ core.Seal(token)
+
+ resp, err := http.Get(addr + "/v1/secret/foo")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ testResponseStatus(t, resp, 503)
+}
+
+func TestHandler_error(t *testing.T) {
+ w := httptest.NewRecorder()
+
+ respondError(w, 500, errors.New("Test Error"))
+
+ if w.Code != 500 {
+ t.Fatalf("expected 500, got %d", w.Code)
+ }
+
+ // The code inside of the error should override
+ // the argument to respondError
+ w2 := httptest.NewRecorder()
+ e := logical.CodedError(403, "error text")
+
+ respondError(w2, 500, e)
+
+ if w2.Code != 403 {
+ t.Fatalf("expected 403, got %d", w2.Code)
+ }
+
+ // vault.ErrSealed is a special case
+ w3 := httptest.NewRecorder()
+
+ respondError(w3, 400, consts.ErrSealed)
+
+ if w3.Code != 503 {
+ t.Fatalf("expected 503, got %d", w3.Code)
+ }
+
+}
diff --git a/vendor/github.com/hashicorp/vault/http/help.go b/vendor/github.com/hashicorp/vault/http/help.go
new file mode 100644
index 0000000..f0ca8b1
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/help.go
@@ -0,0 +1,43 @@
+package http
+
+import (
+ "net/http"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/vault"
+)
+
+func wrapHelpHandler(h http.Handler, core *vault.Core) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ // If the help parameter is not blank, then show the help
+ if v := req.URL.Query().Get("help"); v != "" || req.Method == "HELP" {
+ handleHelp(core, w, req)
+ return
+ }
+
+ h.ServeHTTP(w, req)
+ return
+ })
+}
+
+func handleHelp(core *vault.Core, w http.ResponseWriter, req *http.Request) {
+ path, ok := stripPrefix("/v1/", req.URL.Path)
+ if !ok {
+ respondError(w, http.StatusNotFound, nil)
+ return
+ }
+
+ lreq := requestAuth(core, req, &logical.Request{
+ Operation: logical.HelpOperation,
+ Path: path,
+ Connection: getConnection(req),
+ })
+
+ resp, err := core.HandleRequest(lreq)
+ if err != nil {
+ respondErrorCommon(w, lreq, resp, err)
+ return
+ }
+
+ respondOk(w, resp.Data)
+}
diff --git a/vendor/github.com/hashicorp/vault/http/help_test.go b/vendor/github.com/hashicorp/vault/http/help_test.go
new file mode 100644
index 0000000..cc3879b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/help_test.go
@@ -0,0 +1,23 @@
+package http
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/vault"
+)
+
+func TestHelp(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpGet(t, token, addr+"/v1/sys/mounts?help=1")
+
+ var actual map[string]interface{}
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ if _, ok := actual["help"]; !ok {
+ t.Fatalf("bad: %#v", actual)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/http/http_test.go b/vendor/github.com/hashicorp/vault/http/http_test.go
new file mode 100644
index 0000000..16e0521
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/http_test.go
@@ -0,0 +1,114 @@
+package http
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/go-cleanhttp"
+ "github.com/hashicorp/vault/helper/jsonutil"
+)
+
+func testHttpGet(t *testing.T, token string, addr string) *http.Response {
+ t.Logf("Token is %s", token)
+ return testHttpData(t, "GET", token, addr, nil, false)
+}
+
+func testHttpDelete(t *testing.T, token string, addr string) *http.Response {
+ return testHttpData(t, "DELETE", token, addr, nil, false)
+}
+
+// Go 1.8+ clients redirect automatically which breaks our 307 standby testing
+func testHttpDeleteDisableRedirect(t *testing.T, token string, addr string) *http.Response {
+ return testHttpData(t, "DELETE", token, addr, nil, true)
+}
+
+func testHttpPost(t *testing.T, token string, addr string, body interface{}) *http.Response {
+ return testHttpData(t, "POST", token, addr, body, false)
+}
+
+func testHttpPut(t *testing.T, token string, addr string, body interface{}) *http.Response {
+ return testHttpData(t, "PUT", token, addr, body, false)
+}
+
+// Go 1.8+ clients redirect automatically which breaks our 307 standby testing
+func testHttpPutDisableRedirect(t *testing.T, token string, addr string, body interface{}) *http.Response {
+ return testHttpData(t, "PUT", token, addr, body, true)
+}
+
+func testHttpData(t *testing.T, method string, token string, addr string, body interface{}, disableRedirect bool) *http.Response {
+ bodyReader := new(bytes.Buffer)
+ if body != nil {
+ enc := json.NewEncoder(bodyReader)
+ if err := enc.Encode(body); err != nil {
+ t.Fatalf("err:%s", err)
+ }
+ }
+
+ req, err := http.NewRequest(method, addr, bodyReader)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ req.Header.Set("Content-Type", "application/json")
+
+ if len(token) != 0 {
+ req.Header.Set("X-Vault-Token", token)
+ }
+
+ client := cleanhttp.DefaultClient()
+ client.Timeout = 60 * time.Second
+
+ // From https://github.com/michiwend/gomusicbrainz/pull/4/files
+ defaultRedirectLimit := 30
+
+ client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
+ if disableRedirect {
+ return fmt.Errorf("checkRedirect disabled for test")
+ }
+ if len(via) > defaultRedirectLimit {
+ return fmt.Errorf("%d consecutive requests(redirects)", len(via))
+ }
+ if len(via) == 0 {
+ // No redirects
+ return nil
+ }
+ // mutate the subsequent redirect requests with the first Header
+ if token := via[0].Header.Get("X-Vault-Token"); len(token) != 0 {
+ req.Header.Set("X-Vault-Token", token)
+ }
+ return nil
+ }
+
+ resp, err := client.Do(req)
+ if err != nil && !strings.Contains(err.Error(), "checkRedirect disabled for test") {
+ t.Fatalf("err: %s", err)
+ }
+
+ return resp
+}
+
+func testResponseStatus(t *testing.T, resp *http.Response, code int) {
+ if resp.StatusCode != code {
+ body := new(bytes.Buffer)
+ io.Copy(body, resp.Body)
+ resp.Body.Close()
+
+ t.Fatalf(
+ "Expected status %d, got %d. Body:\n\n%s",
+ code, resp.StatusCode, body.String())
+ }
+}
+
+func testResponseBody(t *testing.T, resp *http.Response, out interface{}) {
+ defer resp.Body.Close()
+
+ if err := jsonutil.DecodeJSONFromReader(resp.Body, out); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/http/logical.go b/vendor/github.com/hashicorp/vault/http/logical.go
new file mode 100644
index 0000000..f73e532
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/logical.go
@@ -0,0 +1,263 @@
+package http
+
+import (
+ "io"
+ "net"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/hashicorp/errwrap"
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/vault"
+)
+
+type PrepareRequestFunc func(*vault.Core, *logical.Request) error
+
+func buildLogicalRequest(core *vault.Core, w http.ResponseWriter, r *http.Request) (*logical.Request, int, error) {
+ // Determine the path...
+ if !strings.HasPrefix(r.URL.Path, "/v1/") {
+ return nil, http.StatusNotFound, nil
+ }
+ path := r.URL.Path[len("/v1/"):]
+ if path == "" {
+ return nil, http.StatusNotFound, nil
+ }
+
+ // Determine the operation
+ var op logical.Operation
+ switch r.Method {
+ case "DELETE":
+ op = logical.DeleteOperation
+ case "GET":
+ op = logical.ReadOperation
+ // Need to call ParseForm to get query params loaded
+ queryVals := r.URL.Query()
+ listStr := queryVals.Get("list")
+ if listStr != "" {
+ list, err := strconv.ParseBool(listStr)
+ if err != nil {
+ return nil, http.StatusBadRequest, nil
+ }
+ if list {
+ op = logical.ListOperation
+ }
+ }
+ case "POST", "PUT":
+ op = logical.UpdateOperation
+ case "LIST":
+ op = logical.ListOperation
+ default:
+ return nil, http.StatusMethodNotAllowed, nil
+ }
+
+ if op == logical.ListOperation {
+ if !strings.HasSuffix(path, "/") {
+ path += "/"
+ }
+ }
+
+ // Parse the request if we can
+ var data map[string]interface{}
+ if op == logical.UpdateOperation {
+ err := parseRequest(r, w, &data)
+ if err == io.EOF {
+ data = nil
+ err = nil
+ }
+ if err != nil {
+ return nil, http.StatusBadRequest, err
+ }
+ }
+
+ var err error
+ request_id, err := uuid.GenerateUUID()
+ if err != nil {
+ return nil, http.StatusBadRequest, errwrap.Wrapf("failed to generate identifier for the request: {{err}}", err)
+ }
+
+ req := requestAuth(core, r, &logical.Request{
+ ID: request_id,
+ Operation: op,
+ Path: path,
+ Data: data,
+ Connection: getConnection(r),
+ Headers: r.Header,
+ })
+
+ req, err = requestWrapInfo(r, req)
+ if err != nil {
+ return nil, http.StatusBadRequest, errwrap.Wrapf("error parsing X-Vault-Wrap-TTL header: {{err}}", err)
+ }
+
+ return req, 0, nil
+}
+
+func handleLogical(core *vault.Core, dataOnly bool, prepareRequestCallback PrepareRequestFunc) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ req, statusCode, err := buildLogicalRequest(core, w, r)
+ if err != nil || statusCode != 0 {
+ respondError(w, statusCode, err)
+ return
+ }
+
+ // Certain endpoints may require changes to the request object. They
+ // will have a callback registered to do the needed operations, so
+ // invoke it before proceeding.
+ if prepareRequestCallback != nil {
+ if err := prepareRequestCallback(core, req); err != nil {
+ respondError(w, http.StatusBadRequest, err)
+ return
+ }
+ }
+
+ // Make the internal request. We attach the connection info
+ // as well in case this is an authentication request that requires
+ // it. Vault core handles stripping this if we need to. This also
+ // handles all error cases; if we hit respondLogical, the request is a
+ // success.
+ resp, ok := request(core, w, r, req)
+ if !ok {
+ return
+ }
+
+ // Build the proper response
+ respondLogical(w, r, req, dataOnly, resp)
+ })
+}
+
+func respondLogical(w http.ResponseWriter, r *http.Request, req *logical.Request, dataOnly bool, resp *logical.Response) {
+ var httpResp *logical.HTTPResponse
+ var ret interface{}
+
+ if resp != nil {
+ if resp.Redirect != "" {
+ // If we have a redirect, redirect! We use a 307 code
+ // because we don't actually know if its permanent.
+ http.Redirect(w, r, resp.Redirect, 307)
+ return
+ }
+
+ // Check if this is a raw response
+ if _, ok := resp.Data[logical.HTTPStatusCode]; ok {
+ respondRaw(w, r, resp)
+ return
+ }
+
+ if resp.WrapInfo != nil && resp.WrapInfo.Token != "" {
+ httpResp = &logical.HTTPResponse{
+ WrapInfo: &logical.HTTPWrapInfo{
+ Token: resp.WrapInfo.Token,
+ TTL: int(resp.WrapInfo.TTL.Seconds()),
+ CreationTime: resp.WrapInfo.CreationTime.Format(time.RFC3339Nano),
+ WrappedAccessor: resp.WrapInfo.WrappedAccessor,
+ },
+ }
+ } else {
+ httpResp = logical.LogicalResponseToHTTPResponse(resp)
+ httpResp.RequestID = req.ID
+ }
+
+ ret = httpResp
+
+ if dataOnly {
+ injector := logical.HTTPSysInjector{
+ Response: httpResp,
+ }
+ ret = injector
+ }
+ }
+
+ // Respond
+ respondOk(w, ret)
+ return
+}
+
+// respondRaw is used when the response is using HTTPContentType and HTTPRawBody
+// to change the default response handling. This is only used for specific things like
+// returning the CRL information on the PKI backends.
+func respondRaw(w http.ResponseWriter, r *http.Request, resp *logical.Response) {
+ retErr := func(w http.ResponseWriter, err string) {
+ w.Header().Set("X-Vault-Raw-Error", err)
+ w.WriteHeader(http.StatusInternalServerError)
+ w.Write(nil)
+ }
+
+ // Ensure this is never a secret or auth response
+ if resp.Secret != nil || resp.Auth != nil {
+ retErr(w, "raw responses cannot contain secrets or auth")
+ return
+ }
+
+ // Get the status code
+ statusRaw, ok := resp.Data[logical.HTTPStatusCode]
+ if !ok {
+ retErr(w, "no status code given")
+ return
+ }
+ status, ok := statusRaw.(int)
+ if !ok {
+ retErr(w, "cannot decode status code")
+ return
+ }
+
+ nonEmpty := status != http.StatusNoContent
+
+ var contentType string
+ var body []byte
+
+ // Get the content type header; don't require it if the body is empty
+ contentTypeRaw, ok := resp.Data[logical.HTTPContentType]
+ if !ok && !nonEmpty {
+ retErr(w, "no content type given")
+ return
+ }
+ if ok {
+ contentType, ok = contentTypeRaw.(string)
+ if !ok {
+ retErr(w, "cannot decode content type")
+ return
+ }
+ }
+
+ if nonEmpty {
+ // Get the body
+ bodyRaw, ok := resp.Data[logical.HTTPRawBody]
+ if !ok {
+ retErr(w, "no body given")
+ return
+ }
+ body, ok = bodyRaw.([]byte)
+ if !ok {
+ retErr(w, "cannot decode body")
+ return
+ }
+ }
+
+ // Write the response
+ if contentType != "" {
+ w.Header().Set("Content-Type", contentType)
+ }
+
+ w.WriteHeader(status)
+ w.Write(body)
+}
+
+// getConnection is used to format the connection information for
+// attaching to a logical request
+func getConnection(r *http.Request) (connection *logical.Connection) {
+ var remoteAddr string
+
+ remoteAddr, _, err := net.SplitHostPort(r.RemoteAddr)
+ if err != nil {
+ remoteAddr = ""
+ }
+
+ connection = &logical.Connection{
+ RemoteAddr: remoteAddr,
+ ConnState: r.TLS,
+ }
+ return
+}
diff --git a/vendor/github.com/hashicorp/vault/http/logical_test.go b/vendor/github.com/hashicorp/vault/http/logical_test.go
new file mode 100644
index 0000000..bbbd892
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/logical_test.go
@@ -0,0 +1,296 @@
+package http
+
+import (
+ "bytes"
+ "encoding/json"
+ "io"
+ "net/http"
+ "reflect"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+
+ log "github.com/mgutz/logxi/v1"
+
+ "github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/physical"
+ "github.com/hashicorp/vault/vault"
+)
+
+func TestLogical(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ // WRITE
+ resp := testHttpPut(t, token, addr+"/v1/secret/foo", map[string]interface{}{
+ "data": "bar",
+ })
+ testResponseStatus(t, resp, 204)
+
+ // READ
+ // Bad token should return a 403
+ resp = testHttpGet(t, token+"bad", addr+"/v1/secret/foo")
+ testResponseStatus(t, resp, 403)
+
+ resp = testHttpGet(t, token, addr+"/v1/secret/foo")
+ var actual map[string]interface{}
+ var nilWarnings interface{}
+ expected := map[string]interface{}{
+ "renewable": false,
+ "lease_duration": json.Number(strconv.Itoa(int((32 * 24 * time.Hour) / time.Second))),
+ "data": map[string]interface{}{
+ "data": "bar",
+ },
+ "auth": nil,
+ "wrap_info": nil,
+ "warnings": nilWarnings,
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ delete(actual, "lease_id")
+ expected["request_id"] = actual["request_id"]
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad:\nactual:\n%#v\nexpected:\n%#v", actual, expected)
+ }
+
+ // DELETE
+ resp = testHttpDelete(t, token, addr+"/v1/secret/foo")
+ testResponseStatus(t, resp, 204)
+
+ resp = testHttpGet(t, token, addr+"/v1/secret/foo")
+ testResponseStatus(t, resp, 404)
+}
+
+func TestLogical_noExist(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpGet(t, token, addr+"/v1/secret/foo")
+ testResponseStatus(t, resp, 404)
+}
+
+func TestLogical_StandbyRedirect(t *testing.T) {
+ ln1, addr1 := TestListener(t)
+ defer ln1.Close()
+ ln2, addr2 := TestListener(t)
+ defer ln2.Close()
+
+ // Create an HA Vault
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ inmha := physical.NewInmemHA(logger)
+ conf := &vault.CoreConfig{
+ Physical: inmha,
+ HAPhysical: inmha,
+ RedirectAddr: addr1,
+ DisableMlock: true,
+ }
+ core1, err := vault.NewCore(conf)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ keys, root := vault.TestCoreInit(t, core1)
+ for _, key := range keys {
+ if _, err := core1.Unseal(vault.TestKeyCopy(key)); err != nil {
+ t.Fatalf("unseal err: %s", err)
+ }
+ }
+
+ // Attempt to fix raciness in this test by giving the first core a chance
+ // to grab the lock
+ time.Sleep(2 * time.Second)
+
+ // Create a second HA Vault
+ conf2 := &vault.CoreConfig{
+ Physical: inmha,
+ HAPhysical: inmha,
+ RedirectAddr: addr2,
+ DisableMlock: true,
+ }
+ core2, err := vault.NewCore(conf2)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ for _, key := range keys {
+ if _, err := core2.Unseal(vault.TestKeyCopy(key)); err != nil {
+ t.Fatalf("unseal err: %s", err)
+ }
+ }
+
+ TestServerWithListener(t, ln1, addr1, core1)
+ TestServerWithListener(t, ln2, addr2, core2)
+ TestServerAuth(t, addr1, root)
+
+ // WRITE to STANDBY
+ resp := testHttpPutDisableRedirect(t, root, addr2+"/v1/secret/foo", map[string]interface{}{
+ "data": "bar",
+ })
+ logger.Trace("307 test one starting")
+ testResponseStatus(t, resp, 307)
+ logger.Trace("307 test one stopping")
+
+ //// READ to standby
+ resp = testHttpGet(t, root, addr2+"/v1/auth/token/lookup-self")
+ var actual map[string]interface{}
+ var nilWarnings interface{}
+ expected := map[string]interface{}{
+ "renewable": false,
+ "lease_duration": json.Number("0"),
+ "data": map[string]interface{}{
+ "meta": nil,
+ "num_uses": json.Number("0"),
+ "path": "auth/token/root",
+ "policies": []interface{}{"root"},
+ "display_name": "root",
+ "orphan": true,
+ "id": root,
+ "ttl": json.Number("0"),
+ "creation_ttl": json.Number("0"),
+ "explicit_max_ttl": json.Number("0"),
+ "expire_time": nil,
+ },
+ "warnings": nilWarnings,
+ "wrap_info": nil,
+ "auth": nil,
+ }
+
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ actualDataMap := actual["data"].(map[string]interface{})
+ delete(actualDataMap, "creation_time")
+ delete(actualDataMap, "accessor")
+ actual["data"] = actualDataMap
+ expected["request_id"] = actual["request_id"]
+ delete(actual, "lease_id")
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: got %#v; expected %#v", actual, expected)
+ }
+
+ //// DELETE to standby
+ resp = testHttpDeleteDisableRedirect(t, root, addr2+"/v1/secret/foo")
+ logger.Trace("307 test two starting")
+ testResponseStatus(t, resp, 307)
+ logger.Trace("307 test two stopping")
+}
+
+func TestLogical_CreateToken(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ // WRITE
+ resp := testHttpPut(t, token, addr+"/v1/auth/token/create", map[string]interface{}{
+ "data": "bar",
+ })
+
+ var actual map[string]interface{}
+ var nilWarnings interface{}
+ expected := map[string]interface{}{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": json.Number("0"),
+ "data": nil,
+ "wrap_info": nil,
+ "auth": map[string]interface{}{
+ "policies": []interface{}{"root"},
+ "metadata": nil,
+ "lease_duration": json.Number("0"),
+ "renewable": false,
+ },
+ "warnings": nilWarnings,
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ delete(actual["auth"].(map[string]interface{}), "client_token")
+ delete(actual["auth"].(map[string]interface{}), "accessor")
+ expected["request_id"] = actual["request_id"]
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad:\nexpected:\n%#v\nactual:\n%#v", expected, actual)
+ }
+}
+
+func TestLogical_RawHTTP(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo", map[string]interface{}{
+ "type": "http",
+ })
+ testResponseStatus(t, resp, 204)
+
+ // Get the raw response
+ resp = testHttpGet(t, token, addr+"/v1/foo/raw")
+ testResponseStatus(t, resp, 200)
+
+ // Test the headers
+ if resp.Header.Get("Content-Type") != "plain/text" {
+ t.Fatalf("Bad: %#v", resp.Header)
+ }
+
+ // Get the body
+ body := new(bytes.Buffer)
+ io.Copy(body, resp.Body)
+ if string(body.Bytes()) != "hello world" {
+ t.Fatalf("Bad: %s", body.Bytes())
+ }
+}
+
+func TestLogical_RequestSizeLimit(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ // Write a very large object, should fail
+ resp := testHttpPut(t, token, addr+"/v1/secret/foo", map[string]interface{}{
+ "data": make([]byte, MaxRequestSize),
+ })
+ testResponseStatus(t, resp, 413)
+}
+
+func TestLogical_ListSuffix(t *testing.T) {
+ core, _, _ := vault.TestCoreUnsealed(t)
+ req, _ := http.NewRequest("GET", "http://127.0.0.1:8200/v1/secret/foo", nil)
+ lreq, status, err := buildLogicalRequest(core, nil, req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if status != 0 {
+ t.Fatalf("got status %d", status)
+ }
+ if strings.HasSuffix(lreq.Path, "/") {
+ t.Fatal("trailing slash found on path")
+ }
+
+ req, _ = http.NewRequest("GET", "http://127.0.0.1:8200/v1/secret/foo?list=true", nil)
+ lreq, status, err = buildLogicalRequest(core, nil, req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if status != 0 {
+ t.Fatalf("got status %d", status)
+ }
+ if !strings.HasSuffix(lreq.Path, "/") {
+ t.Fatal("trailing slash not found on path")
+ }
+
+ req, _ = http.NewRequest("LIST", "http://127.0.0.1:8200/v1/secret/foo", nil)
+ lreq, status, err = buildLogicalRequest(core, nil, req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if status != 0 {
+ t.Fatalf("got status %d", status)
+ }
+ if !strings.HasSuffix(lreq.Path, "/") {
+ t.Fatal("trailing slash not found on path")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_audit_test.go b/vendor/github.com/hashicorp/vault/http/sys_audit_test.go
new file mode 100644
index 0000000..58873bf
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/sys_audit_test.go
@@ -0,0 +1,132 @@
+package http
+
+import (
+ "encoding/json"
+ "reflect"
+ "testing"
+
+ "github.com/hashicorp/vault/vault"
+)
+
+func TestSysAudit(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpPost(t, token, addr+"/v1/sys/audit/noop", map[string]interface{}{
+ "type": "noop",
+ })
+ testResponseStatus(t, resp, 204)
+
+ resp = testHttpGet(t, token, addr+"/v1/sys/audit")
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": json.Number("0"),
+ "wrap_info": nil,
+ "warnings": nil,
+ "auth": nil,
+ "data": map[string]interface{}{
+ "noop/": map[string]interface{}{
+ "path": "noop/",
+ "type": "noop",
+ "description": "",
+ "options": map[string]interface{}{},
+ "local": false,
+ },
+ },
+ "noop/": map[string]interface{}{
+ "path": "noop/",
+ "type": "noop",
+ "description": "",
+ "options": map[string]interface{}{},
+ "local": false,
+ },
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+
+ expected["request_id"] = actual["request_id"]
+
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: expected:\n%#v actual:\n%#v\n", expected, actual)
+ }
+}
+
+func TestSysDisableAudit(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpPost(t, token, addr+"/v1/sys/audit/foo", map[string]interface{}{
+ "type": "noop",
+ })
+ testResponseStatus(t, resp, 204)
+
+ resp = testHttpDelete(t, token, addr+"/v1/sys/audit/foo")
+ testResponseStatus(t, resp, 204)
+
+ resp = testHttpGet(t, token, addr+"/v1/sys/audit")
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": json.Number("0"),
+ "wrap_info": nil,
+ "warnings": nil,
+ "auth": nil,
+ "data": map[string]interface{}{},
+ }
+
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+
+ expected["request_id"] = actual["request_id"]
+
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad:\nactual: %#v\nexpected: %#v\n", actual, expected)
+ }
+}
+
+func TestSysAuditHash(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpPost(t, token, addr+"/v1/sys/audit/noop", map[string]interface{}{
+ "type": "noop",
+ })
+ testResponseStatus(t, resp, 204)
+
+ resp = testHttpPost(t, token, addr+"/v1/sys/audit-hash/noop", map[string]interface{}{
+ "input": "bar",
+ })
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": json.Number("0"),
+ "wrap_info": nil,
+ "warnings": nil,
+ "auth": nil,
+ "data": map[string]interface{}{
+ "hash": "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317",
+ },
+ "hash": "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317",
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+
+ expected["request_id"] = actual["request_id"]
+
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: expected:\n%#v\n, got:\n%#v\n", expected, actual)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_auth_test.go b/vendor/github.com/hashicorp/vault/http/sys_auth_test.go
new file mode 100644
index 0000000..9e19391
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/sys_auth_test.go
@@ -0,0 +1,183 @@
+package http
+
+import (
+ "encoding/json"
+ "reflect"
+ "testing"
+
+ "github.com/hashicorp/vault/vault"
+)
+
+func TestSysAuth(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpGet(t, token, addr+"/v1/sys/auth")
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": json.Number("0"),
+ "wrap_info": nil,
+ "warnings": nil,
+ "auth": nil,
+ "data": map[string]interface{}{
+ "token/": map[string]interface{}{
+ "description": "token based credentials",
+ "type": "token",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ },
+ "local": false,
+ },
+ },
+ "token/": map[string]interface{}{
+ "description": "token based credentials",
+ "type": "token",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ },
+ "local": false,
+ },
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+
+ expected["request_id"] = actual["request_id"]
+
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
+ }
+}
+
+func TestSysEnableAuth(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpPost(t, token, addr+"/v1/sys/auth/foo", map[string]interface{}{
+ "type": "noop",
+ "description": "foo",
+ })
+ testResponseStatus(t, resp, 204)
+
+ resp = testHttpGet(t, token, addr+"/v1/sys/auth")
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": json.Number("0"),
+ "wrap_info": nil,
+ "warnings": nil,
+ "auth": nil,
+ "data": map[string]interface{}{
+ "foo/": map[string]interface{}{
+ "description": "foo",
+ "type": "noop",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ },
+ "local": false,
+ },
+ "token/": map[string]interface{}{
+ "description": "token based credentials",
+ "type": "token",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ },
+ "local": false,
+ },
+ },
+ "foo/": map[string]interface{}{
+ "description": "foo",
+ "type": "noop",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ },
+ "local": false,
+ },
+ "token/": map[string]interface{}{
+ "description": "token based credentials",
+ "type": "token",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ },
+ "local": false,
+ },
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+
+ expected["request_id"] = actual["request_id"]
+
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
+ }
+}
+
+func TestSysDisableAuth(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpPost(t, token, addr+"/v1/sys/auth/foo", map[string]interface{}{
+ "type": "noop",
+ "description": "foo",
+ })
+ testResponseStatus(t, resp, 204)
+
+ resp = testHttpDelete(t, token, addr+"/v1/sys/auth/foo")
+ testResponseStatus(t, resp, 204)
+
+ resp = testHttpGet(t, token, addr+"/v1/sys/auth")
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": json.Number("0"),
+ "wrap_info": nil,
+ "warnings": nil,
+ "auth": nil,
+ "data": map[string]interface{}{
+ "token/": map[string]interface{}{
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ },
+ "description": "token based credentials",
+ "type": "token",
+ "local": false,
+ },
+ },
+ "token/": map[string]interface{}{
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ },
+ "description": "token based credentials",
+ "type": "token",
+ "local": false,
+ },
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+
+ expected["request_id"] = actual["request_id"]
+
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_generate_root.go b/vendor/github.com/hashicorp/vault/http/sys_generate_root.go
new file mode 100644
index 0000000..3697f80
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/sys_generate_root.go
@@ -0,0 +1,182 @@
+package http
+
+import (
+ "encoding/base64"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "net/http"
+
+ "github.com/hashicorp/vault/vault"
+)
+
+func handleSysGenerateRootAttempt(core *vault.Core) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ switch r.Method {
+ case "GET":
+ handleSysGenerateRootAttemptGet(core, w, r)
+ case "POST", "PUT":
+ handleSysGenerateRootAttemptPut(core, w, r)
+ case "DELETE":
+ handleSysGenerateRootAttemptDelete(core, w, r)
+ default:
+ respondError(w, http.StatusMethodNotAllowed, nil)
+ }
+ })
+}
+
+func handleSysGenerateRootAttemptGet(core *vault.Core, w http.ResponseWriter, r *http.Request) {
+ // Get the current seal configuration
+ barrierConfig, err := core.SealAccess().BarrierConfig()
+ if err != nil {
+ respondError(w, http.StatusInternalServerError, err)
+ return
+ }
+ if barrierConfig == nil {
+ respondError(w, http.StatusBadRequest, fmt.Errorf(
+ "server is not yet initialized"))
+ return
+ }
+
+ sealConfig := barrierConfig
+ if core.SealAccess().RecoveryKeySupported() {
+ sealConfig, err = core.SealAccess().RecoveryConfig()
+ if err != nil {
+ respondError(w, http.StatusInternalServerError, err)
+ return
+ }
+ }
+
+ // Get the generation configuration
+ generationConfig, err := core.GenerateRootConfiguration()
+ if err != nil {
+ respondError(w, http.StatusInternalServerError, err)
+ return
+ }
+
+ // Get the progress
+ progress, err := core.GenerateRootProgress()
+ if err != nil {
+ respondError(w, http.StatusInternalServerError, err)
+ return
+ }
+
+ // Format the status
+ status := &GenerateRootStatusResponse{
+ Started: false,
+ Progress: progress,
+ Required: sealConfig.SecretThreshold,
+ Complete: false,
+ }
+ if generationConfig != nil {
+ status.Nonce = generationConfig.Nonce
+ status.Started = true
+ status.PGPFingerprint = generationConfig.PGPFingerprint
+ }
+
+ respondOk(w, status)
+}
+
+func handleSysGenerateRootAttemptPut(core *vault.Core, w http.ResponseWriter, r *http.Request) {
+ // Parse the request
+ var req GenerateRootInitRequest
+ if err := parseRequest(r, w, &req); err != nil {
+ respondError(w, http.StatusBadRequest, err)
+ return
+ }
+
+ if len(req.OTP) > 0 && len(req.PGPKey) > 0 {
+ respondError(w, http.StatusBadRequest, fmt.Errorf("only one of \"otp\" and \"pgp_key\" must be specified"))
+ return
+ }
+
+ // Attemptialize the generation
+ err := core.GenerateRootInit(req.OTP, req.PGPKey)
+ if err != nil {
+ respondError(w, http.StatusBadRequest, err)
+ return
+ }
+
+ handleSysGenerateRootAttemptGet(core, w, r)
+}
+
+func handleSysGenerateRootAttemptDelete(core *vault.Core, w http.ResponseWriter, r *http.Request) {
+ err := core.GenerateRootCancel()
+ if err != nil {
+ respondError(w, http.StatusInternalServerError, err)
+ return
+ }
+ respondOk(w, nil)
+}
+
+func handleSysGenerateRootUpdate(core *vault.Core) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Parse the request
+ var req GenerateRootUpdateRequest
+ if err := parseRequest(r, w, &req); err != nil {
+ respondError(w, http.StatusBadRequest, err)
+ return
+ }
+ if req.Key == "" {
+ respondError(
+ w, http.StatusBadRequest,
+ errors.New("'key' must be specified in request body as JSON"))
+ return
+ }
+
+ // Decode the key, which is base64 or hex encoded
+ min, max := core.BarrierKeyLength()
+ key, err := hex.DecodeString(req.Key)
+ // We check min and max here to ensure that a string that is base64
+ // encoded but also valid hex will not be valid and we instead base64
+ // decode it
+ if err != nil || len(key) < min || len(key) > max {
+ key, err = base64.StdEncoding.DecodeString(req.Key)
+ if err != nil {
+ respondError(
+ w, http.StatusBadRequest,
+ errors.New("'key' must be a valid hex or base64 string"))
+ return
+ }
+ }
+
+ // Use the key to make progress on root generation
+ result, err := core.GenerateRootUpdate(key, req.Nonce)
+ if err != nil {
+ respondError(w, http.StatusBadRequest, err)
+ return
+ }
+
+ resp := &GenerateRootStatusResponse{
+ Complete: result.Progress == result.Required,
+ Nonce: req.Nonce,
+ Progress: result.Progress,
+ Required: result.Required,
+ Started: true,
+ EncodedRootToken: result.EncodedRootToken,
+ PGPFingerprint: result.PGPFingerprint,
+ }
+
+ respondOk(w, resp)
+ })
+}
+
+type GenerateRootInitRequest struct {
+ OTP string `json:"otp"`
+ PGPKey string `json:"pgp_key"`
+}
+
+type GenerateRootStatusResponse struct {
+ Nonce string `json:"nonce"`
+ Started bool `json:"started"`
+ Progress int `json:"progress"`
+ Required int `json:"required"`
+ Complete bool `json:"complete"`
+ EncodedRootToken string `json:"encoded_root_token"`
+ PGPFingerprint string `json:"pgp_fingerprint"`
+}
+
+type GenerateRootUpdateRequest struct {
+ Nonce string
+ Key string
+}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_generate_root_test.go b/vendor/github.com/hashicorp/vault/http/sys_generate_root_test.go
new file mode 100644
index 0000000..41cb2a5
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/sys_generate_root_test.go
@@ -0,0 +1,418 @@
+package http
+
+import (
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/helper/pgpkeys"
+ "github.com/hashicorp/vault/helper/xor"
+ "github.com/hashicorp/vault/vault"
+)
+
+func TestSysGenerateRootAttempt_Status(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp, err := http.Get(addr + "/v1/sys/generate-root/attempt")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "started": false,
+ "progress": json.Number("0"),
+ "required": json.Number("3"),
+ "complete": false,
+ "encoded_root_token": "",
+ "pgp_fingerprint": "",
+ "nonce": "",
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
+ }
+}
+
+func TestSysGenerateRootAttempt_Setup_OTP(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ otpBytes, err := vault.GenerateRandBytes(16)
+ if err != nil {
+ t.Fatal(err)
+ }
+ otp := base64.StdEncoding.EncodeToString(otpBytes)
+
+ resp := testHttpPut(t, token, addr+"/v1/sys/generate-root/attempt", map[string]interface{}{
+ "otp": otp,
+ })
+ testResponseStatus(t, resp, 200)
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "started": true,
+ "progress": json.Number("0"),
+ "required": json.Number("3"),
+ "complete": false,
+ "encoded_root_token": "",
+ "pgp_fingerprint": "",
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ if actual["nonce"].(string) == "" {
+ t.Fatalf("nonce was empty")
+ }
+ expected["nonce"] = actual["nonce"]
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
+ }
+
+ resp = testHttpGet(t, token, addr+"/v1/sys/generate-root/attempt")
+
+ actual = map[string]interface{}{}
+ expected = map[string]interface{}{
+ "started": true,
+ "progress": json.Number("0"),
+ "required": json.Number("3"),
+ "complete": false,
+ "encoded_root_token": "",
+ "pgp_fingerprint": "",
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ if actual["nonce"].(string) == "" {
+ t.Fatalf("nonce was empty")
+ }
+ expected["nonce"] = actual["nonce"]
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
+ }
+}
+
+func TestSysGenerateRootAttempt_Setup_PGP(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpPut(t, token, addr+"/v1/sys/generate-root/attempt", map[string]interface{}{
+ "pgp_key": pgpkeys.TestPubKey1,
+ })
+ testResponseStatus(t, resp, 200)
+
+ resp = testHttpGet(t, token, addr+"/v1/sys/generate-root/attempt")
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "started": true,
+ "progress": json.Number("0"),
+ "required": json.Number("3"),
+ "complete": false,
+ "encoded_root_token": "",
+ "pgp_fingerprint": "816938b8a29146fbe245dd29e7cbaf8e011db793",
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ if actual["nonce"].(string) == "" {
+ t.Fatalf("nonce was empty")
+ }
+ expected["nonce"] = actual["nonce"]
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
+ }
+}
+
+func TestSysGenerateRootAttempt_Cancel(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ otpBytes, err := vault.GenerateRandBytes(16)
+ if err != nil {
+ t.Fatal(err)
+ }
+ otp := base64.StdEncoding.EncodeToString(otpBytes)
+
+ resp := testHttpPut(t, token, addr+"/v1/sys/generate-root/attempt", map[string]interface{}{
+ "otp": otp,
+ })
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "started": true,
+ "progress": json.Number("0"),
+ "required": json.Number("3"),
+ "complete": false,
+ "encoded_root_token": "",
+ "pgp_fingerprint": "",
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ if actual["nonce"].(string) == "" {
+ t.Fatalf("nonce was empty")
+ }
+ expected["nonce"] = actual["nonce"]
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
+ }
+
+ resp = testHttpDelete(t, token, addr+"/v1/sys/generate-root/attempt")
+ testResponseStatus(t, resp, 204)
+
+ resp, err = http.Get(addr + "/v1/sys/generate-root/attempt")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ actual = map[string]interface{}{}
+ expected = map[string]interface{}{
+ "started": false,
+ "progress": json.Number("0"),
+ "required": json.Number("3"),
+ "complete": false,
+ "encoded_root_token": "",
+ "pgp_fingerprint": "",
+ "nonce": "",
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
+ }
+}
+
+func TestSysGenerateRoot_badKey(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ otpBytes, err := vault.GenerateRandBytes(16)
+ if err != nil {
+ t.Fatal(err)
+ }
+ otp := base64.StdEncoding.EncodeToString(otpBytes)
+
+ resp := testHttpPut(t, token, addr+"/v1/sys/generate-root/update", map[string]interface{}{
+ "key": "0123",
+ "otp": otp,
+ })
+ testResponseStatus(t, resp, 400)
+}
+
+func TestSysGenerateRoot_ReAttemptUpdate(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ otpBytes, err := vault.GenerateRandBytes(16)
+ if err != nil {
+ t.Fatal(err)
+ }
+ otp := base64.StdEncoding.EncodeToString(otpBytes)
+ resp := testHttpPut(t, token, addr+"/v1/sys/generate-root/attempt", map[string]interface{}{
+ "otp": otp,
+ })
+ testResponseStatus(t, resp, 200)
+
+ resp = testHttpDelete(t, token, addr+"/v1/sys/generate-root/attempt")
+ testResponseStatus(t, resp, 204)
+
+ resp = testHttpPut(t, token, addr+"/v1/sys/generate-root/attempt", map[string]interface{}{
+ "pgp_key": pgpkeys.TestPubKey1,
+ })
+
+ testResponseStatus(t, resp, 200)
+}
+
+func TestSysGenerateRoot_Update_OTP(t *testing.T) {
+ core, keys, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ otpBytes, err := vault.GenerateRandBytes(16)
+ if err != nil {
+ t.Fatal(err)
+ }
+ otp := base64.StdEncoding.EncodeToString(otpBytes)
+
+ resp := testHttpPut(t, token, addr+"/v1/sys/generate-root/attempt", map[string]interface{}{
+ "otp": otp,
+ })
+ var rootGenerationStatus map[string]interface{}
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &rootGenerationStatus)
+
+ var actual map[string]interface{}
+ var expected map[string]interface{}
+ for i, key := range keys {
+ resp = testHttpPut(t, token, addr+"/v1/sys/generate-root/update", map[string]interface{}{
+ "nonce": rootGenerationStatus["nonce"].(string),
+ "key": hex.EncodeToString(key),
+ })
+
+ actual = map[string]interface{}{}
+ expected = map[string]interface{}{
+ "complete": false,
+ "nonce": rootGenerationStatus["nonce"].(string),
+ "progress": json.Number(fmt.Sprintf("%d", i+1)),
+ "required": json.Number(fmt.Sprintf("%d", len(keys))),
+ "started": true,
+ "pgp_fingerprint": "",
+ }
+ if i+1 == len(keys) {
+ expected["complete"] = true
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ }
+
+ if actual["encoded_root_token"] == nil {
+ t.Fatalf("no encoded root token found in response")
+ }
+ expected["encoded_root_token"] = actual["encoded_root_token"]
+
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
+ }
+
+ decodedToken, err := xor.XORBase64(otp, actual["encoded_root_token"].(string))
+ if err != nil {
+ t.Fatal(err)
+ }
+ newRootToken, err := uuid.FormatUUID(decodedToken)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ actual = map[string]interface{}{}
+ expected = map[string]interface{}{
+ "id": newRootToken,
+ "display_name": "root",
+ "meta": interface{}(nil),
+ "num_uses": json.Number("0"),
+ "policies": []interface{}{"root"},
+ "orphan": true,
+ "creation_ttl": json.Number("0"),
+ "ttl": json.Number("0"),
+ "path": "auth/token/root",
+ "explicit_max_ttl": json.Number("0"),
+ "expire_time": nil,
+ }
+
+ resp = testHttpGet(t, newRootToken, addr+"/v1/auth/token/lookup-self")
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+
+ expected["creation_time"] = actual["data"].(map[string]interface{})["creation_time"]
+ expected["accessor"] = actual["data"].(map[string]interface{})["accessor"]
+
+ if !reflect.DeepEqual(actual["data"], expected) {
+ t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual["data"])
+ }
+}
+
+func TestSysGenerateRoot_Update_PGP(t *testing.T) {
+ core, keys, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpPut(t, token, addr+"/v1/sys/generate-root/attempt", map[string]interface{}{
+ "pgp_key": pgpkeys.TestPubKey1,
+ })
+ testResponseStatus(t, resp, 200)
+
+ // We need to get the nonce first before we update
+ resp, err := http.Get(addr + "/v1/sys/generate-root/attempt")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ var rootGenerationStatus map[string]interface{}
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &rootGenerationStatus)
+
+ var actual map[string]interface{}
+ var expected map[string]interface{}
+ for i, key := range keys {
+ resp = testHttpPut(t, token, addr+"/v1/sys/generate-root/update", map[string]interface{}{
+ "nonce": rootGenerationStatus["nonce"].(string),
+ "key": hex.EncodeToString(key),
+ })
+
+ actual = map[string]interface{}{}
+ expected = map[string]interface{}{
+ "complete": false,
+ "nonce": rootGenerationStatus["nonce"].(string),
+ "progress": json.Number(fmt.Sprintf("%d", i+1)),
+ "required": json.Number(fmt.Sprintf("%d", len(keys))),
+ "started": true,
+ "pgp_fingerprint": "816938b8a29146fbe245dd29e7cbaf8e011db793",
+ }
+ if i+1 == len(keys) {
+ expected["complete"] = true
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ }
+
+ if actual["encoded_root_token"] == nil {
+ t.Fatalf("no encoded root token found in response")
+ }
+ expected["encoded_root_token"] = actual["encoded_root_token"]
+
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
+ }
+
+ decodedTokenBuf, err := pgpkeys.DecryptBytes(actual["encoded_root_token"].(string), pgpkeys.TestPrivKey1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if decodedTokenBuf == nil {
+ t.Fatal("decoded root token buffer is nil")
+ }
+
+ newRootToken := decodedTokenBuf.String()
+
+ actual = map[string]interface{}{}
+ expected = map[string]interface{}{
+ "id": newRootToken,
+ "display_name": "root",
+ "meta": interface{}(nil),
+ "num_uses": json.Number("0"),
+ "policies": []interface{}{"root"},
+ "orphan": true,
+ "creation_ttl": json.Number("0"),
+ "ttl": json.Number("0"),
+ "path": "auth/token/root",
+ "explicit_max_ttl": json.Number("0"),
+ "expire_time": nil,
+ }
+
+ resp = testHttpGet(t, newRootToken, addr+"/v1/auth/token/lookup-self")
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+
+ expected["creation_time"] = actual["data"].(map[string]interface{})["creation_time"]
+ expected["accessor"] = actual["data"].(map[string]interface{})["accessor"]
+
+ if !reflect.DeepEqual(actual["data"], expected) {
+ t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual["data"])
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_health.go b/vendor/github.com/hashicorp/vault/http/sys_health.go
new file mode 100644
index 0000000..40797be
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/sys_health.go
@@ -0,0 +1,158 @@
+package http
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "strconv"
+ "time"
+
+ "github.com/hashicorp/vault/vault"
+ "github.com/hashicorp/vault/version"
+)
+
+func handleSysHealth(core *vault.Core) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ switch r.Method {
+ case "GET":
+ handleSysHealthGet(core, w, r)
+ case "HEAD":
+ handleSysHealthHead(core, w, r)
+ default:
+ respondError(w, http.StatusMethodNotAllowed, nil)
+ }
+ })
+}
+
+func fetchStatusCode(r *http.Request, field string) (int, bool, bool) {
+ var err error
+ statusCode := http.StatusOK
+ if statusCodeStr, statusCodeOk := r.URL.Query()[field]; statusCodeOk {
+ statusCode, err = strconv.Atoi(statusCodeStr[0])
+ if err != nil || len(statusCodeStr) < 1 {
+ return http.StatusBadRequest, false, false
+ }
+ return statusCode, true, true
+ }
+ return statusCode, false, true
+}
+
+func handleSysHealthGet(core *vault.Core, w http.ResponseWriter, r *http.Request) {
+ code, body, err := getSysHealth(core, r)
+ if err != nil {
+ respondError(w, http.StatusInternalServerError, nil)
+ return
+ }
+
+ if body == nil {
+ respondError(w, code, nil)
+ return
+ }
+
+ w.Header().Add("Content-Type", "application/json")
+ w.WriteHeader(code)
+
+ // Generate the response
+ enc := json.NewEncoder(w)
+ enc.Encode(body)
+}
+
+func handleSysHealthHead(core *vault.Core, w http.ResponseWriter, r *http.Request) {
+ code, body, err := getSysHealth(core, r)
+ if err != nil {
+ code = http.StatusInternalServerError
+ }
+
+ if body != nil {
+ w.Header().Add("Content-Type", "application/json")
+ }
+ w.WriteHeader(code)
+}
+
+func getSysHealth(core *vault.Core, r *http.Request) (int, *HealthResponse, error) {
+ // Check if being a standby is allowed for the purpose of a 200 OK
+ _, standbyOK := r.URL.Query()["standbyok"]
+
+ uninitCode := http.StatusNotImplemented
+ if code, found, ok := fetchStatusCode(r, "uninitcode"); !ok {
+ return http.StatusBadRequest, nil, nil
+ } else if found {
+ uninitCode = code
+ }
+
+ sealedCode := http.StatusServiceUnavailable
+ if code, found, ok := fetchStatusCode(r, "sealedcode"); !ok {
+ return http.StatusBadRequest, nil, nil
+ } else if found {
+ sealedCode = code
+ }
+
+ standbyCode := http.StatusTooManyRequests // Consul warning code
+ if code, found, ok := fetchStatusCode(r, "standbycode"); !ok {
+ return http.StatusBadRequest, nil, nil
+ } else if found {
+ standbyCode = code
+ }
+
+ activeCode := http.StatusOK
+ if code, found, ok := fetchStatusCode(r, "activecode"); !ok {
+ return http.StatusBadRequest, nil, nil
+ } else if found {
+ activeCode = code
+ }
+
+ // Check system status
+ sealed, _ := core.Sealed()
+ standby, _ := core.Standby()
+ init, err := core.Initialized()
+ if err != nil {
+ return http.StatusInternalServerError, nil, err
+ }
+
+ // Determine the status code
+ code := activeCode
+ switch {
+ case !init:
+ code = uninitCode
+ case sealed:
+ code = sealedCode
+ case !standbyOK && standby:
+ code = standbyCode
+ }
+
+ // Fetch the local cluster name and identifier
+ var clusterName, clusterID string
+ if !sealed {
+ cluster, err := core.Cluster()
+ if err != nil {
+ return http.StatusInternalServerError, nil, err
+ }
+ if cluster == nil {
+ return http.StatusInternalServerError, nil, fmt.Errorf("failed to fetch cluster details")
+ }
+ clusterName = cluster.Name
+ clusterID = cluster.ID
+ }
+
+ // Format the body
+ body := &HealthResponse{
+ Initialized: init,
+ Sealed: sealed,
+ Standby: standby,
+ ServerTimeUTC: time.Now().UTC().Unix(),
+ Version: version.GetVersion().VersionNumber(),
+ ClusterName: clusterName,
+ ClusterID: clusterID,
+ }
+ return code, body, nil
+}
+
+type HealthResponse struct {
+ Initialized bool `json:"initialized"`
+ Sealed bool `json:"sealed"`
+ Standby bool `json:"standby"`
+ ServerTimeUTC int64 `json:"server_time_utc"`
+ Version string `json:"version"`
+ ClusterName string `json:"cluster_name,omitempty"`
+ ClusterID string `json:"cluster_id,omitempty"`
+}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_health_test.go b/vendor/github.com/hashicorp/vault/http/sys_health_test.go
new file mode 100644
index 0000000..056ea5f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/sys_health_test.go
@@ -0,0 +1,255 @@
+package http
+
+import (
+ "io/ioutil"
+
+ "net/http"
+ "net/url"
+ "reflect"
+ "testing"
+
+ "github.com/hashicorp/vault/vault"
+)
+
+func TestSysHealth_get(t *testing.T) {
+ core := vault.TestCore(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+
+ resp, err := http.Get(addr + "/v1/sys/health")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "initialized": false,
+ "sealed": true,
+ "standby": true,
+ }
+ testResponseStatus(t, resp, 501)
+ testResponseBody(t, resp, &actual)
+ expected["server_time_utc"] = actual["server_time_utc"]
+ expected["version"] = actual["version"]
+ if actual["cluster_name"] == nil {
+ delete(expected, "cluster_name")
+ } else {
+ expected["cluster_name"] = actual["cluster_name"]
+ }
+ if actual["cluster_id"] == nil {
+ delete(expected, "cluster_id")
+ } else {
+ expected["cluster_id"] = actual["cluster_id"]
+ }
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
+ }
+
+ keys, _ := vault.TestCoreInit(t, core)
+ resp, err = http.Get(addr + "/v1/sys/health")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ actual = map[string]interface{}{}
+ expected = map[string]interface{}{
+ "initialized": true,
+ "sealed": true,
+ "standby": true,
+ }
+ testResponseStatus(t, resp, 503)
+ testResponseBody(t, resp, &actual)
+ expected["server_time_utc"] = actual["server_time_utc"]
+ expected["version"] = actual["version"]
+ if actual["cluster_name"] == nil {
+ delete(expected, "cluster_name")
+ } else {
+ expected["cluster_name"] = actual["cluster_name"]
+ }
+ if actual["cluster_id"] == nil {
+ delete(expected, "cluster_id")
+ } else {
+ expected["cluster_id"] = actual["cluster_id"]
+ }
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
+ }
+
+ for _, key := range keys {
+ if _, err := vault.TestCoreUnseal(core, vault.TestKeyCopy(key)); err != nil {
+ t.Fatalf("unseal err: %s", err)
+ }
+ }
+ resp, err = http.Get(addr + "/v1/sys/health")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ actual = map[string]interface{}{}
+ expected = map[string]interface{}{
+ "initialized": true,
+ "sealed": false,
+ "standby": false,
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ expected["server_time_utc"] = actual["server_time_utc"]
+ expected["version"] = actual["version"]
+ if actual["cluster_name"] == nil {
+ delete(expected, "cluster_name")
+ } else {
+ expected["cluster_name"] = actual["cluster_name"]
+ }
+ if actual["cluster_id"] == nil {
+ delete(expected, "cluster_id")
+ } else {
+ expected["cluster_id"] = actual["cluster_id"]
+ }
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
+ }
+
+}
+
+func TestSysHealth_customcodes(t *testing.T) {
+ core := vault.TestCore(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+
+ queryurl, err := url.Parse(addr + "/v1/sys/health?uninitcode=581&sealedcode=523&activecode=202")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ resp, err := http.Get(queryurl.String())
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "initialized": false,
+ "sealed": true,
+ "standby": true,
+ }
+ testResponseStatus(t, resp, 581)
+ testResponseBody(t, resp, &actual)
+
+ expected["server_time_utc"] = actual["server_time_utc"]
+ expected["version"] = actual["version"]
+ if actual["cluster_name"] == nil {
+ delete(expected, "cluster_name")
+ } else {
+ expected["cluster_name"] = actual["cluster_name"]
+ }
+ if actual["cluster_id"] == nil {
+ delete(expected, "cluster_id")
+ } else {
+ expected["cluster_id"] = actual["cluster_id"]
+ }
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
+ }
+
+ keys, _ := vault.TestCoreInit(t, core)
+ resp, err = http.Get(queryurl.String())
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ actual = map[string]interface{}{}
+ expected = map[string]interface{}{
+ "initialized": true,
+ "sealed": true,
+ "standby": true,
+ }
+ testResponseStatus(t, resp, 523)
+ testResponseBody(t, resp, &actual)
+
+ expected["server_time_utc"] = actual["server_time_utc"]
+ expected["version"] = actual["version"]
+ if actual["cluster_name"] == nil {
+ delete(expected, "cluster_name")
+ } else {
+ expected["cluster_name"] = actual["cluster_name"]
+ }
+ if actual["cluster_id"] == nil {
+ delete(expected, "cluster_id")
+ } else {
+ expected["cluster_id"] = actual["cluster_id"]
+ }
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
+ }
+
+ for _, key := range keys {
+ if _, err := vault.TestCoreUnseal(core, vault.TestKeyCopy(key)); err != nil {
+ t.Fatalf("unseal err: %s", err)
+ }
+ }
+ resp, err = http.Get(queryurl.String())
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ actual = map[string]interface{}{}
+ expected = map[string]interface{}{
+ "initialized": true,
+ "sealed": false,
+ "standby": false,
+ }
+ testResponseStatus(t, resp, 202)
+ testResponseBody(t, resp, &actual)
+ expected["server_time_utc"] = actual["server_time_utc"]
+ expected["version"] = actual["version"]
+ if actual["cluster_name"] == nil {
+ delete(expected, "cluster_name")
+ } else {
+ expected["cluster_name"] = actual["cluster_name"]
+ }
+ if actual["cluster_id"] == nil {
+ delete(expected, "cluster_id")
+ } else {
+ expected["cluster_id"] = actual["cluster_id"]
+ }
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
+ }
+}
+
+func TestSysHealth_head(t *testing.T) {
+ core, _, _ := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+
+ testData := []struct {
+ uri string
+ code int
+ }{
+ {"", 200},
+ {"?activecode=503", 503},
+ {"?activecode=notacode", 400},
+ }
+
+ for _, tt := range testData {
+ queryurl, err := url.Parse(addr + "/v1/sys/health" + tt.uri)
+ if err != nil {
+ t.Fatalf("err on %v: %s", queryurl, err)
+ }
+ resp, err := http.Head(queryurl.String())
+ if err != nil {
+ t.Fatalf("err on %v: %s", queryurl, err)
+ }
+
+ if resp.StatusCode != tt.code {
+ t.Fatalf("HEAD %v expected code %d, got %d.", queryurl, tt.code, resp.StatusCode)
+ }
+
+ data, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Fatalf("err on %v: %s", queryurl, err)
+ }
+ if len(data) > 0 {
+ t.Fatalf("HEAD %v expected no body, received \"%v\".", queryurl, data)
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_init.go b/vendor/github.com/hashicorp/vault/http/sys_init.go
new file mode 100644
index 0000000..c29b040
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/sys_init.go
@@ -0,0 +1,162 @@
+package http
+
+import (
+ "encoding/base64"
+ "encoding/hex"
+ "fmt"
+ "net/http"
+
+ "github.com/hashicorp/errwrap"
+ "github.com/hashicorp/vault/vault"
+)
+
+func handleSysInit(core *vault.Core) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ switch r.Method {
+ case "GET":
+ handleSysInitGet(core, w, r)
+ case "PUT", "POST":
+ handleSysInitPut(core, w, r)
+ default:
+ respondError(w, http.StatusMethodNotAllowed, nil)
+ }
+ })
+}
+
+func handleSysInitGet(core *vault.Core, w http.ResponseWriter, r *http.Request) {
+ init, err := core.Initialized()
+ if err != nil {
+ respondError(w, http.StatusInternalServerError, err)
+ return
+ }
+
+ respondOk(w, &InitStatusResponse{
+ Initialized: init,
+ })
+}
+
+func handleSysInitPut(core *vault.Core, w http.ResponseWriter, r *http.Request) {
+ // Parse the request
+ var req InitRequest
+ if err := parseRequest(r, w, &req); err != nil {
+ respondError(w, http.StatusBadRequest, err)
+ return
+ }
+
+ // Initialize
+ barrierConfig := &vault.SealConfig{
+ SecretShares: req.SecretShares,
+ SecretThreshold: req.SecretThreshold,
+ StoredShares: req.StoredShares,
+ PGPKeys: req.PGPKeys,
+ }
+
+ recoveryConfig := &vault.SealConfig{
+ SecretShares: req.RecoveryShares,
+ SecretThreshold: req.RecoveryThreshold,
+ PGPKeys: req.RecoveryPGPKeys,
+ }
+
+ if core.SealAccess().StoredKeysSupported() {
+ if barrierConfig.SecretShares != 1 {
+ respondError(w, http.StatusBadRequest, fmt.Errorf("secret shares must be 1"))
+ return
+ }
+ if barrierConfig.SecretThreshold != barrierConfig.SecretShares {
+ respondError(w, http.StatusBadRequest, fmt.Errorf("secret threshold must be same as secret shares"))
+ return
+ }
+ if barrierConfig.StoredShares != barrierConfig.SecretShares {
+ respondError(w, http.StatusBadRequest, fmt.Errorf("stored shares must be same as secret shares"))
+ return
+ }
+ if barrierConfig.PGPKeys != nil && len(barrierConfig.PGPKeys) > 0 {
+ respondError(w, http.StatusBadRequest, fmt.Errorf("PGP keys not supported when storing shares"))
+ return
+ }
+ } else {
+ if barrierConfig.StoredShares > 0 {
+ respondError(w, http.StatusBadRequest, fmt.Errorf("stored keys are not supported"))
+ return
+ }
+ }
+
+ if len(barrierConfig.PGPKeys) > 0 && len(barrierConfig.PGPKeys) != barrierConfig.SecretShares-barrierConfig.StoredShares {
+ respondError(w, http.StatusBadRequest, fmt.Errorf("incorrect number of PGP keys"))
+ return
+ }
+
+ if core.SealAccess().RecoveryKeySupported() {
+ if len(recoveryConfig.PGPKeys) > 0 && len(recoveryConfig.PGPKeys) != recoveryConfig.SecretShares-recoveryConfig.StoredShares {
+ respondError(w, http.StatusBadRequest, fmt.Errorf("incorrect number of PGP keys for recovery"))
+ return
+ }
+ }
+
+ initParams := &vault.InitParams{
+ BarrierConfig: barrierConfig,
+ RecoveryConfig: recoveryConfig,
+ RootTokenPGPKey: req.RootTokenPGPKey,
+ }
+
+ result, initErr := core.Initialize(initParams)
+ if initErr != nil {
+ if !errwrap.ContainsType(initErr, new(vault.NonFatalError)) {
+ respondError(w, http.StatusBadRequest, initErr)
+ return
+ } else {
+ // Add a warnings field? The error will be logged in the vault log
+ // already.
+ }
+ }
+
+ // Encode the keys
+ keys := make([]string, 0, len(result.SecretShares))
+ keysB64 := make([]string, 0, len(result.SecretShares))
+ for _, k := range result.SecretShares {
+ keys = append(keys, hex.EncodeToString(k))
+ keysB64 = append(keysB64, base64.StdEncoding.EncodeToString(k))
+ }
+
+ resp := &InitResponse{
+ Keys: keys,
+ KeysB64: keysB64,
+ RootToken: result.RootToken,
+ }
+
+ if len(result.RecoveryShares) > 0 {
+ resp.RecoveryKeys = make([]string, 0, len(result.RecoveryShares))
+ resp.RecoveryKeysB64 = make([]string, 0, len(result.RecoveryShares))
+ for _, k := range result.RecoveryShares {
+ resp.RecoveryKeys = append(resp.RecoveryKeys, hex.EncodeToString(k))
+ resp.RecoveryKeysB64 = append(resp.RecoveryKeysB64, base64.StdEncoding.EncodeToString(k))
+ }
+ }
+
+ core.UnsealWithStoredKeys()
+
+ respondOk(w, resp)
+}
+
+type InitRequest struct {
+ SecretShares int `json:"secret_shares"`
+ SecretThreshold int `json:"secret_threshold"`
+ StoredShares int `json:"stored_shares"`
+ PGPKeys []string `json:"pgp_keys"`
+ RecoveryShares int `json:"recovery_shares"`
+ RecoveryThreshold int `json:"recovery_threshold"`
+ RecoveryPGPKeys []string `json:"recovery_pgp_keys"`
+ RootTokenPGPKey string `json:"root_token_pgp_key"`
+}
+
+type InitResponse struct {
+ Keys []string `json:"keys"`
+ KeysB64 []string `json:"keys_base64"`
+ RecoveryKeys []string `json:"recovery_keys,omitempty"`
+ RecoveryKeysB64 []string `json:"recovery_keys_base64,omitempty"`
+ RootToken string `json:"root_token"`
+}
+
+type InitStatusResponse struct {
+ Initialized bool `json:"initialized"`
+}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_init_test.go b/vendor/github.com/hashicorp/vault/http/sys_init_test.go
new file mode 100644
index 0000000..9dfa776
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/sys_init_test.go
@@ -0,0 +1,129 @@
+package http
+
+import (
+ "encoding/hex"
+ "net/http"
+ "reflect"
+ "testing"
+
+ "github.com/hashicorp/vault/vault"
+)
+
+func TestSysInit_get(t *testing.T) {
+ core := vault.TestCore(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+
+ {
+ // Pre-init
+ resp, err := http.Get(addr + "/v1/sys/init")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "initialized": false,
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: %#v", actual)
+ }
+ }
+
+ vault.TestCoreInit(t, core)
+
+ {
+ // Post-init
+ resp, err := http.Get(addr + "/v1/sys/init")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "initialized": true,
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: %#v", actual)
+ }
+ }
+}
+
+// Test to check if the API errors out when wrong number of PGP keys are
+// supplied
+func TestSysInit_pgpKeysEntries(t *testing.T) {
+ core := vault.TestCore(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+
+ resp := testHttpPut(t, "", addr+"/v1/sys/init", map[string]interface{}{
+ "secret_shares": 5,
+ "secret_threhold": 3,
+ "pgp_keys": []string{"pgpkey1"},
+ })
+ testResponseStatus(t, resp, 400)
+}
+
+// Test to check if the API errors out when wrong number of PGP keys are
+// supplied for recovery config
+func TestSysInit_pgpKeysEntriesForRecovery(t *testing.T) {
+ core := vault.TestCoreNewSeal(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+
+ resp := testHttpPut(t, "", addr+"/v1/sys/init", map[string]interface{}{
+ "secret_shares": 1,
+ "secret_threshold": 1,
+ "stored_shares": 1,
+ "recovery_shares": 5,
+ "recovery_threshold": 3,
+ "recovery_pgp_keys": []string{"pgpkey1"},
+ })
+ testResponseStatus(t, resp, 400)
+}
+
+func TestSysInit_put(t *testing.T) {
+ core := vault.TestCore(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+
+ resp := testHttpPut(t, "", addr+"/v1/sys/init", map[string]interface{}{
+ "secret_shares": 5,
+ "secret_threshold": 3,
+ })
+
+ var actual map[string]interface{}
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ keysRaw, ok := actual["keys"]
+ if !ok {
+ t.Fatalf("no keys: %#v", actual)
+ }
+
+ if _, ok := actual["root_token"]; !ok {
+ t.Fatal("no root token")
+ }
+
+ for _, key := range keysRaw.([]interface{}) {
+ keySlice, err := hex.DecodeString(key.(string))
+ if err != nil {
+ t.Fatalf("bad: %s", err)
+ }
+
+ if _, err := core.Unseal(keySlice); err != nil {
+ t.Fatalf("bad: %s", err)
+ }
+ }
+
+ seal, err := core.Sealed()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if seal {
+ t.Fatal("should not be sealed")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_leader.go b/vendor/github.com/hashicorp/vault/http/sys_leader.go
new file mode 100644
index 0000000..ad5f281
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/sys_leader.go
@@ -0,0 +1,44 @@
+package http
+
+import (
+ "net/http"
+
+ "github.com/hashicorp/errwrap"
+ "github.com/hashicorp/vault/vault"
+)
+
+func handleSysLeader(core *vault.Core) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ switch r.Method {
+ case "GET":
+ handleSysLeaderGet(core, w, r)
+ default:
+ respondError(w, http.StatusMethodNotAllowed, nil)
+ }
+ })
+}
+
+func handleSysLeaderGet(core *vault.Core, w http.ResponseWriter, r *http.Request) {
+ haEnabled := true
+ isLeader, address, err := core.Leader()
+ if errwrap.Contains(err, vault.ErrHANotEnabled.Error()) {
+ haEnabled = false
+ err = nil
+ }
+ if err != nil {
+ respondError(w, http.StatusInternalServerError, err)
+ return
+ }
+
+ respondOk(w, &LeaderResponse{
+ HAEnabled: haEnabled,
+ IsSelf: isLeader,
+ LeaderAddress: address,
+ })
+}
+
+type LeaderResponse struct {
+ HAEnabled bool `json:"ha_enabled"`
+ IsSelf bool `json:"is_self"`
+ LeaderAddress string `json:"leader_address"`
+}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_leader_test.go b/vendor/github.com/hashicorp/vault/http/sys_leader_test.go
new file mode 100644
index 0000000..9c0c7d2
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/sys_leader_test.go
@@ -0,0 +1,32 @@
+package http
+
+import (
+ "net/http"
+ "reflect"
+ "testing"
+
+ "github.com/hashicorp/vault/vault"
+)
+
+func TestSysLeader_get(t *testing.T) {
+ core, _, _ := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+
+ resp, err := http.Get(addr + "/v1/sys/leader")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "ha_enabled": false,
+ "is_self": false,
+ "leader_address": "",
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: %#v", actual)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_lease_test.go b/vendor/github.com/hashicorp/vault/http/sys_lease_test.go
new file mode 100644
index 0000000..6b7bc34
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/sys_lease_test.go
@@ -0,0 +1,54 @@
+package http
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/vault"
+)
+
+func TestSysRenew(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ // write secret
+ resp := testHttpPut(t, token, addr+"/v1/secret/foo", map[string]interface{}{
+ "data": "bar",
+ "lease": "1h",
+ })
+ testResponseStatus(t, resp, 204)
+
+ // read secret
+ resp = testHttpGet(t, token, addr+"/v1/secret/foo")
+ var result struct {
+ LeaseId string `json:"lease_id"`
+ }
+ if err := jsonutil.DecodeJSONFromReader(resp.Body, &result); err != nil {
+ t.Fatalf("bad: %s", err)
+ }
+
+ resp = testHttpPut(t, token, addr+"/v1/sys/renew/"+result.LeaseId, nil)
+ testResponseStatus(t, resp, 200)
+}
+
+func TestSysRevoke(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpPut(t, token, addr+"/v1/sys/revoke/secret/foo/1234", nil)
+ testResponseStatus(t, resp, 204)
+}
+
+func TestSysRevokePrefix(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpPut(t, token, addr+"/v1/sys/revoke-prefix/secret/foo/1234", nil)
+ testResponseStatus(t, resp, 204)
+}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_mount_test.go b/vendor/github.com/hashicorp/vault/http/sys_mount_test.go
new file mode 100644
index 0000000..2e12f0f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/sys_mount_test.go
@@ -0,0 +1,794 @@
+package http
+
+import (
+ "encoding/json"
+ "reflect"
+ "testing"
+
+ "github.com/fatih/structs"
+ "github.com/hashicorp/vault/vault"
+)
+
+func TestSysMounts(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpGet(t, token, addr+"/v1/sys/mounts")
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": json.Number("0"),
+ "wrap_info": nil,
+ "warnings": nil,
+ "auth": nil,
+ "data": map[string]interface{}{
+ "secret/": map[string]interface{}{
+ "description": "generic secret storage",
+ "type": "generic",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "sys/": map[string]interface{}{
+ "description": "system endpoints used for control, policy and debugging",
+ "type": "system",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "cubbyhole/": map[string]interface{}{
+ "description": "per-token private secret storage",
+ "type": "cubbyhole",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": true,
+ },
+ },
+ "secret/": map[string]interface{}{
+ "description": "generic secret storage",
+ "type": "generic",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "sys/": map[string]interface{}{
+ "description": "system endpoints used for control, policy and debugging",
+ "type": "system",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "cubbyhole/": map[string]interface{}{
+ "description": "per-token private secret storage",
+ "type": "cubbyhole",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": true,
+ },
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ expected["request_id"] = actual["request_id"]
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: %#v", actual)
+ }
+}
+
+func TestSysMount(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo", map[string]interface{}{
+ "type": "generic",
+ "description": "foo",
+ })
+ testResponseStatus(t, resp, 204)
+
+ resp = testHttpGet(t, token, addr+"/v1/sys/mounts")
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": json.Number("0"),
+ "wrap_info": nil,
+ "warnings": nil,
+ "auth": nil,
+ "data": map[string]interface{}{
+ "foo/": map[string]interface{}{
+ "description": "foo",
+ "type": "generic",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "secret/": map[string]interface{}{
+ "description": "generic secret storage",
+ "type": "generic",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "sys/": map[string]interface{}{
+ "description": "system endpoints used for control, policy and debugging",
+ "type": "system",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "cubbyhole/": map[string]interface{}{
+ "description": "per-token private secret storage",
+ "type": "cubbyhole",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": true,
+ },
+ },
+ "foo/": map[string]interface{}{
+ "description": "foo",
+ "type": "generic",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "secret/": map[string]interface{}{
+ "description": "generic secret storage",
+ "type": "generic",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "sys/": map[string]interface{}{
+ "description": "system endpoints used for control, policy and debugging",
+ "type": "system",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "cubbyhole/": map[string]interface{}{
+ "description": "per-token private secret storage",
+ "type": "cubbyhole",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": true,
+ },
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ expected["request_id"] = actual["request_id"]
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: %#v", actual)
+ }
+}
+
+func TestSysMount_put(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpPut(t, token, addr+"/v1/sys/mounts/foo", map[string]interface{}{
+ "type": "generic",
+ "description": "foo",
+ })
+ testResponseStatus(t, resp, 204)
+
+ // The TestSysMount test tests the thing is actually created. See that test
+ // for more info.
+}
+
+func TestSysRemount(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo", map[string]interface{}{
+ "type": "generic",
+ "description": "foo",
+ })
+ testResponseStatus(t, resp, 204)
+
+ resp = testHttpPost(t, token, addr+"/v1/sys/remount", map[string]interface{}{
+ "from": "foo",
+ "to": "bar",
+ })
+ testResponseStatus(t, resp, 204)
+
+ resp = testHttpGet(t, token, addr+"/v1/sys/mounts")
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": json.Number("0"),
+ "wrap_info": nil,
+ "warnings": nil,
+ "auth": nil,
+ "data": map[string]interface{}{
+ "bar/": map[string]interface{}{
+ "description": "foo",
+ "type": "generic",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "secret/": map[string]interface{}{
+ "description": "generic secret storage",
+ "type": "generic",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "sys/": map[string]interface{}{
+ "description": "system endpoints used for control, policy and debugging",
+ "type": "system",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "cubbyhole/": map[string]interface{}{
+ "description": "per-token private secret storage",
+ "type": "cubbyhole",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": true,
+ },
+ },
+ "bar/": map[string]interface{}{
+ "description": "foo",
+ "type": "generic",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "secret/": map[string]interface{}{
+ "description": "generic secret storage",
+ "type": "generic",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "sys/": map[string]interface{}{
+ "description": "system endpoints used for control, policy and debugging",
+ "type": "system",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "cubbyhole/": map[string]interface{}{
+ "description": "per-token private secret storage",
+ "type": "cubbyhole",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": true,
+ },
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ expected["request_id"] = actual["request_id"]
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: %#v", actual)
+ }
+}
+
+func TestSysUnmount(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo", map[string]interface{}{
+ "type": "generic",
+ "description": "foo",
+ })
+ testResponseStatus(t, resp, 204)
+
+ resp = testHttpDelete(t, token, addr+"/v1/sys/mounts/foo")
+ testResponseStatus(t, resp, 204)
+
+ resp = testHttpGet(t, token, addr+"/v1/sys/mounts")
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": json.Number("0"),
+ "wrap_info": nil,
+ "warnings": nil,
+ "auth": nil,
+ "data": map[string]interface{}{
+ "secret/": map[string]interface{}{
+ "description": "generic secret storage",
+ "type": "generic",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "sys/": map[string]interface{}{
+ "description": "system endpoints used for control, policy and debugging",
+ "type": "system",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "cubbyhole/": map[string]interface{}{
+ "description": "per-token private secret storage",
+ "type": "cubbyhole",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": true,
+ },
+ },
+ "secret/": map[string]interface{}{
+ "description": "generic secret storage",
+ "type": "generic",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "sys/": map[string]interface{}{
+ "description": "system endpoints used for control, policy and debugging",
+ "type": "system",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "cubbyhole/": map[string]interface{}{
+ "description": "per-token private secret storage",
+ "type": "cubbyhole",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": true,
+ },
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ expected["request_id"] = actual["request_id"]
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: %#v", actual)
+ }
+}
+
+func TestSysTuneMount(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo", map[string]interface{}{
+ "type": "generic",
+ "description": "foo",
+ })
+ testResponseStatus(t, resp, 204)
+
+ resp = testHttpGet(t, token, addr+"/v1/sys/mounts")
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": json.Number("0"),
+ "wrap_info": nil,
+ "warnings": nil,
+ "auth": nil,
+ "data": map[string]interface{}{
+ "foo/": map[string]interface{}{
+ "description": "foo",
+ "type": "generic",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "secret/": map[string]interface{}{
+ "description": "generic secret storage",
+ "type": "generic",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "sys/": map[string]interface{}{
+ "description": "system endpoints used for control, policy and debugging",
+ "type": "system",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "cubbyhole/": map[string]interface{}{
+ "description": "per-token private secret storage",
+ "type": "cubbyhole",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": true,
+ },
+ },
+ "foo/": map[string]interface{}{
+ "description": "foo",
+ "type": "generic",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "secret/": map[string]interface{}{
+ "description": "generic secret storage",
+ "type": "generic",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "sys/": map[string]interface{}{
+ "description": "system endpoints used for control, policy and debugging",
+ "type": "system",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "cubbyhole/": map[string]interface{}{
+ "description": "per-token private secret storage",
+ "type": "cubbyhole",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": true,
+ },
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ expected["request_id"] = actual["request_id"]
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: %#v", actual)
+ }
+
+ // Shorter than system default
+ resp = testHttpPost(t, token, addr+"/v1/sys/mounts/foo/tune", map[string]interface{}{
+ "default_lease_ttl": "72h",
+ })
+ testResponseStatus(t, resp, 204)
+
+ // Longer than system max
+ resp = testHttpPost(t, token, addr+"/v1/sys/mounts/foo/tune", map[string]interface{}{
+ "default_lease_ttl": "72000h",
+ })
+ testResponseStatus(t, resp, 400)
+
+ // Longer than system default
+ resp = testHttpPost(t, token, addr+"/v1/sys/mounts/foo/tune", map[string]interface{}{
+ "max_lease_ttl": "72000h",
+ })
+ testResponseStatus(t, resp, 204)
+
+ // Longer than backend max
+ resp = testHttpPost(t, token, addr+"/v1/sys/mounts/foo/tune", map[string]interface{}{
+ "default_lease_ttl": "72001h",
+ })
+ testResponseStatus(t, resp, 400)
+
+ // Shorter than backend default
+ resp = testHttpPost(t, token, addr+"/v1/sys/mounts/foo/tune", map[string]interface{}{
+ "max_lease_ttl": "1h",
+ })
+ testResponseStatus(t, resp, 400)
+
+ // Shorter than backend max, longer than system max
+ resp = testHttpPost(t, token, addr+"/v1/sys/mounts/foo/tune", map[string]interface{}{
+ "default_lease_ttl": "71999h",
+ })
+ testResponseStatus(t, resp, 204)
+
+ resp = testHttpGet(t, token, addr+"/v1/sys/mounts")
+ expected = map[string]interface{}{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": json.Number("0"),
+ "wrap_info": nil,
+ "warnings": nil,
+ "auth": nil,
+ "data": map[string]interface{}{
+ "foo/": map[string]interface{}{
+ "description": "foo",
+ "type": "generic",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("259196400"),
+ "max_lease_ttl": json.Number("259200000"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "secret/": map[string]interface{}{
+ "description": "generic secret storage",
+ "type": "generic",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "sys/": map[string]interface{}{
+ "description": "system endpoints used for control, policy and debugging",
+ "type": "system",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "cubbyhole/": map[string]interface{}{
+ "description": "per-token private secret storage",
+ "type": "cubbyhole",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": true,
+ },
+ },
+ "foo/": map[string]interface{}{
+ "description": "foo",
+ "type": "generic",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("259196400"),
+ "max_lease_ttl": json.Number("259200000"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "secret/": map[string]interface{}{
+ "description": "generic secret storage",
+ "type": "generic",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "sys/": map[string]interface{}{
+ "description": "system endpoints used for control, policy and debugging",
+ "type": "system",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "cubbyhole/": map[string]interface{}{
+ "description": "per-token private secret storage",
+ "type": "cubbyhole",
+ "config": map[string]interface{}{
+ "default_lease_ttl": json.Number("0"),
+ "max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
+ },
+ "local": true,
+ },
+ }
+
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ expected["request_id"] = actual["request_id"]
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, actual)
+ }
+
+ // Check simple configuration endpoint
+ resp = testHttpGet(t, token, addr+"/v1/sys/mounts/foo/tune")
+ actual = map[string]interface{}{}
+ expected = map[string]interface{}{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": json.Number("0"),
+ "wrap_info": nil,
+ "warnings": nil,
+ "auth": nil,
+ "data": map[string]interface{}{
+ "default_lease_ttl": json.Number("259196400"),
+ "max_lease_ttl": json.Number("259200000"),
+ "force_no_cache": false,
+ },
+ "default_lease_ttl": json.Number("259196400"),
+ "max_lease_ttl": json.Number("259200000"),
+ "force_no_cache": false,
+ }
+
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ expected["request_id"] = actual["request_id"]
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, actual)
+ }
+
+ // Set a low max
+ resp = testHttpPost(t, token, addr+"/v1/sys/mounts/secret/tune", map[string]interface{}{
+ "default_lease_ttl": "40s",
+ "max_lease_ttl": "80s",
+ })
+ testResponseStatus(t, resp, 204)
+
+ resp = testHttpGet(t, token, addr+"/v1/sys/mounts/secret/tune")
+ actual = map[string]interface{}{}
+ expected = map[string]interface{}{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": json.Number("0"),
+ "wrap_info": nil,
+ "warnings": nil,
+ "auth": nil,
+ "data": map[string]interface{}{
+ "default_lease_ttl": json.Number("40"),
+ "max_lease_ttl": json.Number("80"),
+ "force_no_cache": false,
+ },
+ "default_lease_ttl": json.Number("40"),
+ "max_lease_ttl": json.Number("80"),
+ "force_no_cache": false,
+ }
+
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ expected["request_id"] = actual["request_id"]
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, actual)
+ }
+
+ // First try with lease above backend max
+ resp = testHttpPut(t, token, addr+"/v1/secret/foo", map[string]interface{}{
+ "data": "bar",
+ "ttl": "28347h",
+ })
+ testResponseStatus(t, resp, 204)
+
+ // read secret
+ resp = testHttpGet(t, token, addr+"/v1/secret/foo")
+ var result struct {
+ LeaseID string `json:"lease_id" structs:"lease_id"`
+ LeaseDuration int `json:"lease_duration" structs:"lease_duration"`
+ }
+
+ testResponseBody(t, resp, &result)
+
+ expected = map[string]interface{}{
+ "lease_duration": int(80),
+ "lease_id": result.LeaseID,
+ }
+
+ if !reflect.DeepEqual(structs.Map(result), expected) {
+ t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, structs.Map(result))
+ }
+
+ // Now with lease TTL unspecified
+ resp = testHttpPut(t, token, addr+"/v1/secret/foo", map[string]interface{}{
+ "data": "bar",
+ })
+ testResponseStatus(t, resp, 204)
+
+ // read secret
+ resp = testHttpGet(t, token, addr+"/v1/secret/foo")
+
+ testResponseBody(t, resp, &result)
+
+ expected = map[string]interface{}{
+ "lease_duration": int(40),
+ "lease_id": result.LeaseID,
+ }
+
+ if !reflect.DeepEqual(structs.Map(result), expected) {
+ t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, structs.Map(result))
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_mounts_test.go b/vendor/github.com/hashicorp/vault/http/sys_mounts_test.go
new file mode 100644
index 0000000..5dc0bf9
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/sys_mounts_test.go
@@ -0,0 +1,65 @@
+package http
+
+import (
+ "fmt"
+ "math/rand"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/vault"
+)
+
+func TestSysMountConfig(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+
+ config := api.DefaultConfig()
+ config.Address = addr
+
+ client, err := api.NewClient(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ client.SetToken(token)
+
+ // Set up a test mount
+ path, err := testMount(client)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer client.Sys().Unmount(path)
+
+ // Get config info for this mount
+ mountConfig, err := client.Sys().MountConfig(path)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectedDefaultTTL := 2764800
+ if mountConfig.DefaultLeaseTTL != expectedDefaultTTL {
+ t.Fatalf("Expected default lease TTL: %d, got %d",
+ expectedDefaultTTL, mountConfig.DefaultLeaseTTL)
+ }
+
+ expectedMaxTTL := 2764800
+ if mountConfig.MaxLeaseTTL != expectedMaxTTL {
+ t.Fatalf("Expected default lease TTL: %d, got %d",
+ expectedMaxTTL, mountConfig.MaxLeaseTTL)
+ }
+
+ if mountConfig.ForceNoCache == true {
+ t.Fatalf("did not expect force cache")
+ }
+}
+
+// testMount sets up a test mount of a generic backend w/ a random path; caller
+// is responsible for unmounting
+func testMount(client *api.Client) (string, error) {
+ rand.Seed(time.Now().UTC().UnixNano())
+ randInt := rand.New(rand.NewSource(time.Now().UnixNano())).Int()
+ path := fmt.Sprintf("testmount-%d", randInt)
+ err := client.Sys().Mount(path, &api.MountInput{Type: "generic"})
+ return path, err
+}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_policy_test.go b/vendor/github.com/hashicorp/vault/http/sys_policy_test.go
new file mode 100644
index 0000000..6a8a33b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/sys_policy_test.go
@@ -0,0 +1,156 @@
+package http
+
+import (
+ "encoding/json"
+ "reflect"
+ "testing"
+
+ "github.com/hashicorp/vault/vault"
+)
+
+func TestSysPolicies(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpGet(t, token, addr+"/v1/sys/policy")
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": json.Number("0"),
+ "wrap_info": nil,
+ "warnings": nil,
+ "auth": nil,
+ "data": map[string]interface{}{
+ "policies": []interface{}{"default", "root"},
+ "keys": []interface{}{"default", "root"},
+ },
+ "policies": []interface{}{"default", "root"},
+ "keys": []interface{}{"default", "root"},
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ expected["request_id"] = actual["request_id"]
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: got\n%#v\nexpected\n%#v\n", actual, expected)
+ }
+}
+
+func TestSysReadPolicy(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpGet(t, token, addr+"/v1/sys/policy/root")
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": json.Number("0"),
+ "wrap_info": nil,
+ "warnings": nil,
+ "auth": nil,
+ "data": map[string]interface{}{
+ "name": "root",
+ "rules": "",
+ },
+ "name": "root",
+ "rules": "",
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ expected["request_id"] = actual["request_id"]
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: got\n%#v\nexpected\n%#v\n", actual, expected)
+ }
+}
+
+func TestSysWritePolicy(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpPost(t, token, addr+"/v1/sys/policy/foo", map[string]interface{}{
+ "rules": ``,
+ })
+ testResponseStatus(t, resp, 204)
+
+ resp = testHttpGet(t, token, addr+"/v1/sys/policy")
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": json.Number("0"),
+ "wrap_info": nil,
+ "warnings": nil,
+ "auth": nil,
+ "data": map[string]interface{}{
+ "policies": []interface{}{"default", "foo", "root"},
+ "keys": []interface{}{"default", "foo", "root"},
+ },
+ "policies": []interface{}{"default", "foo", "root"},
+ "keys": []interface{}{"default", "foo", "root"},
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ expected["request_id"] = actual["request_id"]
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: got\n%#v\nexpected\n%#v\n", actual, expected)
+ }
+
+ resp = testHttpPost(t, token, addr+"/v1/sys/policy/response-wrapping", map[string]interface{}{
+ "rules": ``,
+ })
+ testResponseStatus(t, resp, 400)
+}
+
+func TestSysDeletePolicy(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpPost(t, token, addr+"/v1/sys/policy/foo", map[string]interface{}{
+ "rules": ``,
+ })
+ testResponseStatus(t, resp, 204)
+
+ resp = testHttpDelete(t, token, addr+"/v1/sys/policy/foo")
+ testResponseStatus(t, resp, 204)
+
+ // Also attempt to delete these since they should not be allowed (ignore
+ // responses, if they exist later that's sufficient)
+ resp = testHttpDelete(t, token, addr+"/v1/sys/policy/default")
+ resp = testHttpDelete(t, token, addr+"/v1/sys/policy/response-wrapping")
+
+ resp = testHttpGet(t, token, addr+"/v1/sys/policy")
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": json.Number("0"),
+ "wrap_info": nil,
+ "warnings": nil,
+ "auth": nil,
+ "data": map[string]interface{}{
+ "policies": []interface{}{"default", "root"},
+ "keys": []interface{}{"default", "root"},
+ },
+ "policies": []interface{}{"default", "root"},
+ "keys": []interface{}{"default", "root"},
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ expected["request_id"] = actual["request_id"]
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: got\n%#v\nexpected\n%#v\n", actual, expected)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_rekey.go b/vendor/github.com/hashicorp/vault/http/sys_rekey.go
new file mode 100644
index 0000000..bd597b6
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/sys_rekey.go
@@ -0,0 +1,253 @@
+package http
+
+import (
+ "encoding/base64"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "net/http"
+
+ "github.com/hashicorp/vault/helper/consts"
+ "github.com/hashicorp/vault/helper/pgpkeys"
+ "github.com/hashicorp/vault/vault"
+)
+
+func handleSysRekeyInit(core *vault.Core, recovery bool) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ standby, _ := core.Standby()
+ if standby {
+ respondStandby(core, w, r.URL)
+ return
+ }
+
+ repState := core.ReplicationState()
+ if repState == consts.ReplicationSecondary {
+ respondError(w, http.StatusBadRequest,
+ fmt.Errorf("rekeying can only be performed on the primary cluster when replication is activated"))
+ return
+ }
+
+ switch {
+ case recovery && !core.SealAccess().RecoveryKeySupported():
+ respondError(w, http.StatusBadRequest, fmt.Errorf("recovery rekeying not supported"))
+ case r.Method == "GET":
+ handleSysRekeyInitGet(core, recovery, w, r)
+ case r.Method == "POST" || r.Method == "PUT":
+ handleSysRekeyInitPut(core, recovery, w, r)
+ case r.Method == "DELETE":
+ handleSysRekeyInitDelete(core, recovery, w, r)
+ default:
+ respondError(w, http.StatusMethodNotAllowed, nil)
+ }
+ })
+}
+
+func handleSysRekeyInitGet(core *vault.Core, recovery bool, w http.ResponseWriter, r *http.Request) {
+ barrierConfig, err := core.SealAccess().BarrierConfig()
+ if err != nil {
+ respondError(w, http.StatusInternalServerError, err)
+ return
+ }
+ if barrierConfig == nil {
+ respondError(w, http.StatusBadRequest, fmt.Errorf(
+ "server is not yet initialized"))
+ return
+ }
+
+ // Get the rekey configuration
+ rekeyConf, err := core.RekeyConfig(recovery)
+ if err != nil {
+ respondError(w, http.StatusInternalServerError, err)
+ return
+ }
+
+ // Get the progress
+ progress, err := core.RekeyProgress(recovery)
+ if err != nil {
+ respondError(w, http.StatusInternalServerError, err)
+ return
+ }
+
+ sealThreshold, err := core.RekeyThreshold(recovery)
+ if err != nil {
+ respondError(w, http.StatusInternalServerError, err)
+ return
+ }
+
+ // Format the status
+ status := &RekeyStatusResponse{
+ Started: false,
+ T: 0,
+ N: 0,
+ Progress: progress,
+ Required: sealThreshold,
+ }
+ if rekeyConf != nil {
+ status.Nonce = rekeyConf.Nonce
+ status.Started = true
+ status.T = rekeyConf.SecretThreshold
+ status.N = rekeyConf.SecretShares
+ if rekeyConf.PGPKeys != nil && len(rekeyConf.PGPKeys) != 0 {
+ pgpFingerprints, err := pgpkeys.GetFingerprints(rekeyConf.PGPKeys, nil)
+ if err != nil {
+ respondError(w, http.StatusInternalServerError, err)
+ return
+ }
+ status.PGPFingerprints = pgpFingerprints
+ status.Backup = rekeyConf.Backup
+ }
+ }
+ respondOk(w, status)
+}
+
+func handleSysRekeyInitPut(core *vault.Core, recovery bool, w http.ResponseWriter, r *http.Request) {
+ // Parse the request
+ var req RekeyRequest
+ if err := parseRequest(r, w, &req); err != nil {
+ respondError(w, http.StatusBadRequest, err)
+ return
+ }
+
+ if req.Backup && len(req.PGPKeys) == 0 {
+ respondError(w, http.StatusBadRequest, fmt.Errorf("cannot request a backup of the new keys without providing PGP keys for encryption"))
+ return
+ }
+
+ // Right now we don't support this, but the rest of the code is ready for
+ // when we do, hence the check below for this to be false if
+ // StoredShares is greater than zero
+ if core.SealAccess().StoredKeysSupported() && !recovery {
+ respondError(w, http.StatusBadRequest, fmt.Errorf("rekeying of barrier not supported when stored key support is available"))
+ return
+ }
+
+ if len(req.PGPKeys) > 0 && len(req.PGPKeys) != req.SecretShares-req.StoredShares {
+ respondError(w, http.StatusBadRequest, fmt.Errorf("incorrect number of PGP keys for rekey"))
+ return
+ }
+
+ // Initialize the rekey
+ err := core.RekeyInit(&vault.SealConfig{
+ SecretShares: req.SecretShares,
+ SecretThreshold: req.SecretThreshold,
+ StoredShares: req.StoredShares,
+ PGPKeys: req.PGPKeys,
+ Backup: req.Backup,
+ }, recovery)
+ if err != nil {
+ respondError(w, http.StatusBadRequest, err)
+ return
+ }
+
+ handleSysRekeyInitGet(core, recovery, w, r)
+}
+
+func handleSysRekeyInitDelete(core *vault.Core, recovery bool, w http.ResponseWriter, r *http.Request) {
+ err := core.RekeyCancel(recovery)
+ if err != nil {
+ respondError(w, http.StatusInternalServerError, err)
+ return
+ }
+ respondOk(w, nil)
+}
+
+func handleSysRekeyUpdate(core *vault.Core, recovery bool) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ standby, _ := core.Standby()
+ if standby {
+ respondStandby(core, w, r.URL)
+ return
+ }
+
+ // Parse the request
+ var req RekeyUpdateRequest
+ if err := parseRequest(r, w, &req); err != nil {
+ respondError(w, http.StatusBadRequest, err)
+ return
+ }
+ if req.Key == "" {
+ respondError(
+ w, http.StatusBadRequest,
+ errors.New("'key' must be specified in request body as JSON"))
+ return
+ }
+
+ // Decode the key, which is base64 or hex encoded
+ min, max := core.BarrierKeyLength()
+ key, err := hex.DecodeString(req.Key)
+ // We check min and max here to ensure that a string that is base64
+ // encoded but also valid hex will not be valid and we instead base64
+ // decode it
+ if err != nil || len(key) < min || len(key) > max {
+ key, err = base64.StdEncoding.DecodeString(req.Key)
+ if err != nil {
+ respondError(
+ w, http.StatusBadRequest,
+ errors.New("'key' must be a valid hex or base64 string"))
+ return
+ }
+ }
+
+ // Use the key to make progress on rekey
+ result, err := core.RekeyUpdate(key, req.Nonce, recovery)
+ if err != nil {
+ respondError(w, http.StatusBadRequest, err)
+ return
+ }
+
+ // Format the response
+ resp := &RekeyUpdateResponse{}
+ if result != nil {
+ resp.Complete = true
+ resp.Nonce = req.Nonce
+ resp.Backup = result.Backup
+ resp.PGPFingerprints = result.PGPFingerprints
+
+ // Encode the keys
+ keys := make([]string, 0, len(result.SecretShares))
+ keysB64 := make([]string, 0, len(result.SecretShares))
+ for _, k := range result.SecretShares {
+ keys = append(keys, hex.EncodeToString(k))
+ keysB64 = append(keysB64, base64.StdEncoding.EncodeToString(k))
+ }
+ resp.Keys = keys
+ resp.KeysB64 = keysB64
+ respondOk(w, resp)
+ } else {
+ handleSysRekeyInitGet(core, recovery, w, r)
+ }
+ })
+}
+
+type RekeyRequest struct {
+ SecretShares int `json:"secret_shares"`
+ SecretThreshold int `json:"secret_threshold"`
+ StoredShares int `json:"stored_shares"`
+ PGPKeys []string `json:"pgp_keys"`
+ Backup bool `json:"backup"`
+}
+
+type RekeyStatusResponse struct {
+ Nonce string `json:"nonce"`
+ Started bool `json:"started"`
+ T int `json:"t"`
+ N int `json:"n"`
+ Progress int `json:"progress"`
+ Required int `json:"required"`
+ PGPFingerprints []string `json:"pgp_fingerprints"`
+ Backup bool `json:"backup"`
+}
+
+type RekeyUpdateRequest struct {
+ Nonce string
+ Key string
+}
+
+type RekeyUpdateResponse struct {
+ Nonce string `json:"nonce"`
+ Complete bool `json:"complete"`
+ Keys []string `json:"keys"`
+ KeysB64 []string `json:"keys_base64"`
+ PGPFingerprints []string `json:"pgp_fingerprints"`
+ Backup bool `json:"backup"`
+}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_rekey_test.go b/vendor/github.com/hashicorp/vault/http/sys_rekey_test.go
new file mode 100644
index 0000000..b4a49ad
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/sys_rekey_test.go
@@ -0,0 +1,260 @@
+package http
+
+import (
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+
+ "github.com/hashicorp/vault/vault"
+)
+
+// Test to check if the API errors out when wrong number of PGP keys are
+// supplied for rekey
+func TestSysRekeyInit_pgpKeysEntriesForRekey(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpPut(t, token, addr+"/v1/sys/rekey/init", map[string]interface{}{
+ "secret_shares": 5,
+ "secret_threshold": 3,
+ "pgp_keys": []string{"pgpkey1"},
+ })
+ testResponseStatus(t, resp, 400)
+}
+
+func TestSysRekeyInit_Status(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp, err := http.Get(addr + "/v1/sys/rekey/init")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "started": false,
+ "t": json.Number("0"),
+ "n": json.Number("0"),
+ "progress": json.Number("0"),
+ "required": json.Number("3"),
+ "pgp_fingerprints": interface{}(nil),
+ "backup": false,
+ "nonce": "",
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
+ }
+}
+
+func TestSysRekeyInit_Setup(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpPut(t, token, addr+"/v1/sys/rekey/init", map[string]interface{}{
+ "secret_shares": 5,
+ "secret_threshold": 3,
+ })
+ testResponseStatus(t, resp, 200)
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "started": true,
+ "t": json.Number("3"),
+ "n": json.Number("5"),
+ "progress": json.Number("0"),
+ "required": json.Number("3"),
+ "pgp_fingerprints": interface{}(nil),
+ "backup": false,
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ if actual["nonce"].(string) == "" {
+ t.Fatalf("nonce was empty")
+ }
+ expected["nonce"] = actual["nonce"]
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
+ }
+
+ resp = testHttpGet(t, token, addr+"/v1/sys/rekey/init")
+
+ actual = map[string]interface{}{}
+ expected = map[string]interface{}{
+ "started": true,
+ "t": json.Number("3"),
+ "n": json.Number("5"),
+ "progress": json.Number("0"),
+ "required": json.Number("3"),
+ "pgp_fingerprints": interface{}(nil),
+ "backup": false,
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ if actual["nonce"].(string) == "" {
+ t.Fatalf("nonce was empty")
+ }
+ if actual["nonce"].(string) == "" {
+ t.Fatalf("nonce was empty")
+ }
+ expected["nonce"] = actual["nonce"]
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
+ }
+}
+
+func TestSysRekeyInit_Cancel(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpPut(t, token, addr+"/v1/sys/rekey/init", map[string]interface{}{
+ "secret_shares": 5,
+ "secret_threshold": 3,
+ })
+ testResponseStatus(t, resp, 200)
+
+ resp = testHttpDelete(t, token, addr+"/v1/sys/rekey/init")
+ testResponseStatus(t, resp, 204)
+
+ resp, err := http.Get(addr + "/v1/sys/rekey/init")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "started": false,
+ "t": json.Number("0"),
+ "n": json.Number("0"),
+ "progress": json.Number("0"),
+ "required": json.Number("3"),
+ "pgp_fingerprints": interface{}(nil),
+ "backup": false,
+ "nonce": "",
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
+ }
+}
+
+func TestSysRekey_badKey(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpPut(t, token, addr+"/v1/sys/rekey/update", map[string]interface{}{
+ "key": "0123",
+ })
+ testResponseStatus(t, resp, 400)
+}
+
+func TestSysRekey_Update(t *testing.T) {
+ core, keys, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpPut(t, token, addr+"/v1/sys/rekey/init", map[string]interface{}{
+ "secret_shares": 5,
+ "secret_threshold": 3,
+ })
+ var rekeyStatus map[string]interface{}
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &rekeyStatus)
+
+ var actual map[string]interface{}
+ var expected map[string]interface{}
+
+ for i, key := range keys {
+ resp = testHttpPut(t, token, addr+"/v1/sys/rekey/update", map[string]interface{}{
+ "nonce": rekeyStatus["nonce"].(string),
+ "key": hex.EncodeToString(key),
+ })
+
+ actual = map[string]interface{}{}
+ expected = map[string]interface{}{
+ "started": true,
+ "nonce": rekeyStatus["nonce"].(string),
+ "backup": false,
+ "pgp_fingerprints": interface{}(nil),
+ "required": json.Number("3"),
+ "t": json.Number("3"),
+ "n": json.Number("5"),
+ "progress": json.Number(fmt.Sprintf("%d", i+1)),
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+
+ if i+1 == len(keys) {
+ delete(expected, "started")
+ delete(expected, "required")
+ delete(expected, "t")
+ delete(expected, "n")
+ delete(expected, "progress")
+ expected["complete"] = true
+ expected["keys"] = actual["keys"]
+ expected["keys_base64"] = actual["keys_base64"]
+ }
+
+ if i+1 < len(keys) && (actual["nonce"] == nil || actual["nonce"].(string) == "") {
+ t.Fatalf("expected a nonce, i is %d, actual is %#v", i, actual)
+ }
+
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("\nexpected: \n%#v\nactual: \n%#v", expected, actual)
+ }
+ }
+
+ retKeys := actual["keys"].([]interface{})
+ if len(retKeys) != 5 {
+ t.Fatalf("bad: %#v", retKeys)
+ }
+ keysB64 := actual["keys_base64"].([]interface{})
+ if len(keysB64) != 5 {
+ t.Fatalf("bad: %#v", keysB64)
+ }
+}
+
+func TestSysRekey_ReInitUpdate(t *testing.T) {
+ core, keys, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpPut(t, token, addr+"/v1/sys/rekey/init", map[string]interface{}{
+ "secret_shares": 5,
+ "secret_threshold": 3,
+ })
+ testResponseStatus(t, resp, 200)
+
+ resp = testHttpDelete(t, token, addr+"/v1/sys/rekey/init")
+ testResponseStatus(t, resp, 204)
+
+ resp = testHttpPut(t, token, addr+"/v1/sys/rekey/init", map[string]interface{}{
+ "secret_shares": 5,
+ "secret_threshold": 3,
+ })
+ testResponseStatus(t, resp, 200)
+
+ resp = testHttpPut(t, token, addr+"/v1/sys/rekey/update", map[string]interface{}{
+ "key": hex.EncodeToString(keys[0]),
+ })
+
+ testResponseStatus(t, resp, 400)
+}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_rotate_test.go b/vendor/github.com/hashicorp/vault/http/sys_rotate_test.go
new file mode 100644
index 0000000..cbb8a38
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/sys_rotate_test.go
@@ -0,0 +1,51 @@
+package http
+
+import (
+ "encoding/json"
+ "reflect"
+ "testing"
+
+ "github.com/hashicorp/vault/vault"
+)
+
+func TestSysRotate(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpPost(t, token, addr+"/v1/sys/rotate", map[string]interface{}{})
+ testResponseStatus(t, resp, 204)
+
+ resp = testHttpGet(t, token, addr+"/v1/sys/key-status")
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": json.Number("0"),
+ "wrap_info": nil,
+ "warnings": nil,
+ "auth": nil,
+ "data": map[string]interface{}{
+ "term": json.Number("2"),
+ },
+ "term": json.Number("2"),
+ }
+
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+
+ actualInstallTime, ok := actual["data"].(map[string]interface{})["install_time"]
+ if !ok || actualInstallTime == "" {
+ t.Fatal("install_time missing in data")
+ }
+ expected["data"].(map[string]interface{})["install_time"] = actualInstallTime
+ expected["install_time"] = actualInstallTime
+
+ expected["request_id"] = actual["request_id"]
+
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad:\nexpected: %#v\nactual: %#v", expected, actual)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_seal.go b/vendor/github.com/hashicorp/vault/http/sys_seal.go
new file mode 100644
index 0000000..ef24304
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/sys_seal.go
@@ -0,0 +1,218 @@
+package http
+
+import (
+ "encoding/base64"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "net/http"
+
+ "github.com/hashicorp/errwrap"
+ "github.com/hashicorp/vault/helper/consts"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/vault"
+ "github.com/hashicorp/vault/version"
+)
+
+func handleSysSeal(core *vault.Core) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ req, statusCode, err := buildLogicalRequest(core, w, r)
+ if err != nil || statusCode != 0 {
+ respondError(w, statusCode, err)
+ return
+ }
+
+ switch req.Operation {
+ case logical.UpdateOperation:
+ default:
+ respondError(w, http.StatusMethodNotAllowed, nil)
+ return
+ }
+
+ // Seal with the token above
+ if err := core.SealWithRequest(req); err != nil {
+ if errwrap.Contains(err, logical.ErrPermissionDenied.Error()) {
+ respondError(w, http.StatusForbidden, err)
+ return
+ } else {
+ respondError(w, http.StatusInternalServerError, err)
+ return
+ }
+ }
+
+ respondOk(w, nil)
+ })
+}
+
+func handleSysStepDown(core *vault.Core) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ req, statusCode, err := buildLogicalRequest(core, w, r)
+ if err != nil || statusCode != 0 {
+ respondError(w, statusCode, err)
+ return
+ }
+
+ switch req.Operation {
+ case logical.UpdateOperation:
+ default:
+ respondError(w, http.StatusMethodNotAllowed, nil)
+ return
+ }
+
+ // Seal with the token above
+ if err := core.StepDown(req); err != nil {
+ respondError(w, http.StatusInternalServerError, err)
+ return
+ }
+
+ respondOk(w, nil)
+ })
+}
+
+func handleSysUnseal(core *vault.Core) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ switch r.Method {
+ case "PUT":
+ case "POST":
+ default:
+ respondError(w, http.StatusMethodNotAllowed, nil)
+ return
+ }
+
+ // Parse the request
+ var req UnsealRequest
+ if err := parseRequest(r, w, &req); err != nil {
+ respondError(w, http.StatusBadRequest, err)
+ return
+ }
+ if !req.Reset && req.Key == "" {
+ respondError(
+ w, http.StatusBadRequest,
+ errors.New("'key' must be specified in request body as JSON, or 'reset' set to true"))
+ return
+ }
+
+ if req.Reset {
+ sealed, err := core.Sealed()
+ if err != nil {
+ respondError(w, http.StatusInternalServerError, err)
+ return
+ }
+ if !sealed {
+ respondError(w, http.StatusBadRequest, errors.New("vault is unsealed"))
+ return
+ }
+ core.ResetUnsealProcess()
+ } else {
+ // Decode the key, which is base64 or hex encoded
+ min, max := core.BarrierKeyLength()
+ key, err := hex.DecodeString(req.Key)
+ // We check min and max here to ensure that a string that is base64
+ // encoded but also valid hex will not be valid and we instead base64
+ // decode it
+ if err != nil || len(key) < min || len(key) > max {
+ key, err = base64.StdEncoding.DecodeString(req.Key)
+ if err != nil {
+ respondError(
+ w, http.StatusBadRequest,
+ errors.New("'key' must be a valid hex or base64 string"))
+ return
+ }
+ }
+
+ // Attempt the unseal
+ if _, err := core.Unseal(key); err != nil {
+ switch {
+ case errwrap.ContainsType(err, new(vault.ErrInvalidKey)):
+ case errwrap.Contains(err, vault.ErrBarrierInvalidKey.Error()):
+ case errwrap.Contains(err, vault.ErrBarrierNotInit.Error()):
+ case errwrap.Contains(err, vault.ErrBarrierSealed.Error()):
+ case errwrap.Contains(err, consts.ErrStandby.Error()):
+ default:
+ respondError(w, http.StatusInternalServerError, err)
+ return
+ }
+ respondError(w, http.StatusBadRequest, err)
+ return
+ }
+ }
+
+ // Return the seal status
+ handleSysSealStatusRaw(core, w, r)
+ })
+}
+
+func handleSysSealStatus(core *vault.Core) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.Method != "GET" {
+ respondError(w, http.StatusMethodNotAllowed, nil)
+ return
+ }
+
+ handleSysSealStatusRaw(core, w, r)
+ })
+}
+
+func handleSysSealStatusRaw(core *vault.Core, w http.ResponseWriter, r *http.Request) {
+ sealed, err := core.Sealed()
+ if err != nil {
+ respondError(w, http.StatusInternalServerError, err)
+ return
+ }
+
+ sealConfig, err := core.SealAccess().BarrierConfig()
+ if err != nil {
+ respondError(w, http.StatusInternalServerError, err)
+ return
+ }
+ if sealConfig == nil {
+ respondError(w, http.StatusBadRequest, fmt.Errorf(
+ "server is not yet initialized"))
+ return
+ }
+
+ // Fetch the local cluster name and identifier
+ var clusterName, clusterID string
+ if !sealed {
+ cluster, err := core.Cluster()
+ if err != nil {
+ respondError(w, http.StatusInternalServerError, err)
+ return
+ }
+ if cluster == nil {
+ respondError(w, http.StatusInternalServerError, fmt.Errorf("failed to fetch cluster details"))
+ return
+ }
+ clusterName = cluster.Name
+ clusterID = cluster.ID
+ }
+
+ progress, nonce := core.SecretProgress()
+
+ respondOk(w, &SealStatusResponse{
+ Sealed: sealed,
+ T: sealConfig.SecretThreshold,
+ N: sealConfig.SecretShares,
+ Progress: progress,
+ Nonce: nonce,
+ Version: version.GetVersion().VersionNumber(),
+ ClusterName: clusterName,
+ ClusterID: clusterID,
+ })
+}
+
+type SealStatusResponse struct {
+ Sealed bool `json:"sealed"`
+ T int `json:"t"`
+ N int `json:"n"`
+ Progress int `json:"progress"`
+ Nonce string `json:"nonce"`
+ Version string `json:"version"`
+ ClusterName string `json:"cluster_name,omitempty"`
+ ClusterID string `json:"cluster_id,omitempty"`
+}
+
+type UnsealRequest struct {
+ Key string
+ Reset bool
+}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_seal_test.go b/vendor/github.com/hashicorp/vault/http/sys_seal_test.go
new file mode 100644
index 0000000..8124614
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/sys_seal_test.go
@@ -0,0 +1,381 @@
+package http
+
+import (
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "strconv"
+ "testing"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/vault"
+)
+
+func TestSysSealStatus(t *testing.T) {
+ core := vault.TestCore(t)
+ vault.TestCoreInit(t, core)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+
+ resp, err := http.Get(addr + "/v1/sys/seal-status")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "sealed": true,
+ "t": json.Number("3"),
+ "n": json.Number("3"),
+ "progress": json.Number("0"),
+ "nonce": "",
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ if actual["version"] == nil {
+ t.Fatalf("expected version information")
+ }
+ expected["version"] = actual["version"]
+ if actual["cluster_name"] == nil {
+ delete(expected, "cluster_name")
+ } else {
+ expected["cluster_name"] = actual["cluster_name"]
+ }
+ if actual["cluster_id"] == nil {
+ delete(expected, "cluster_id")
+ } else {
+ expected["cluster_id"] = actual["cluster_id"]
+ }
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual)
+ }
+}
+
+func TestSysSealStatus_uninit(t *testing.T) {
+ core := vault.TestCore(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+
+ resp, err := http.Get(addr + "/v1/sys/seal-status")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ testResponseStatus(t, resp, 400)
+}
+
+func TestSysSeal(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpPut(t, token, addr+"/v1/sys/seal", nil)
+ testResponseStatus(t, resp, 204)
+
+ check, err := core.Sealed()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if !check {
+ t.Fatal("should be sealed")
+ }
+}
+
+func TestSysSeal_unsealed(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpPut(t, token, addr+"/v1/sys/seal", nil)
+ testResponseStatus(t, resp, 204)
+
+ check, err := core.Sealed()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if !check {
+ t.Fatal("should be sealed")
+ }
+}
+
+func TestSysUnseal(t *testing.T) {
+ core := vault.TestCore(t)
+ keys, _ := vault.TestCoreInit(t, core)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+
+ for i, key := range keys {
+ resp := testHttpPut(t, "", addr+"/v1/sys/unseal", map[string]interface{}{
+ "key": hex.EncodeToString(key),
+ })
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "sealed": true,
+ "t": json.Number("3"),
+ "n": json.Number("3"),
+ "progress": json.Number(fmt.Sprintf("%d", i+1)),
+ "nonce": "",
+ }
+ if i == len(keys)-1 {
+ expected["sealed"] = false
+ expected["progress"] = json.Number("0")
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ if i < len(keys)-1 && (actual["nonce"] == nil || actual["nonce"].(string) == "") {
+ t.Fatalf("got nil nonce, actual is %#v", actual)
+ } else {
+ expected["nonce"] = actual["nonce"]
+ }
+ if actual["version"] == nil {
+ t.Fatalf("expected version information")
+ }
+ expected["version"] = actual["version"]
+ if actual["cluster_name"] == nil {
+ delete(expected, "cluster_name")
+ } else {
+ expected["cluster_name"] = actual["cluster_name"]
+ }
+ if actual["cluster_id"] == nil {
+ delete(expected, "cluster_id")
+ } else {
+ expected["cluster_id"] = actual["cluster_id"]
+ }
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: expected: \n%#v\nactual: \n%#v", expected, actual)
+ }
+ }
+}
+
+func TestSysUnseal_badKey(t *testing.T) {
+ core := vault.TestCore(t)
+ vault.TestCoreInit(t, core)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+
+ resp := testHttpPut(t, "", addr+"/v1/sys/unseal", map[string]interface{}{
+ "key": "0123",
+ })
+ testResponseStatus(t, resp, 400)
+}
+
+func TestSysUnseal_Reset(t *testing.T) {
+ core := vault.TestCore(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+
+ thresh := 3
+ resp := testHttpPut(t, "", addr+"/v1/sys/init", map[string]interface{}{
+ "secret_shares": 5,
+ "secret_threshold": thresh,
+ })
+
+ var actual map[string]interface{}
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ keysRaw, ok := actual["keys"]
+ if !ok {
+ t.Fatalf("no keys: %#v", actual)
+ }
+ for i, key := range keysRaw.([]interface{}) {
+ if i > thresh-2 {
+ break
+ }
+
+ resp := testHttpPut(t, "", addr+"/v1/sys/unseal", map[string]interface{}{
+ "key": key.(string),
+ })
+
+ var actual map[string]interface{}
+ expected := map[string]interface{}{
+ "sealed": true,
+ "t": json.Number("3"),
+ "n": json.Number("5"),
+ "progress": json.Number(strconv.Itoa(i + 1)),
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ if actual["version"] == nil {
+ t.Fatalf("expected version information")
+ }
+ expected["version"] = actual["version"]
+ if actual["nonce"] == "" && expected["sealed"].(bool) {
+ t.Fatalf("expected a nonce")
+ }
+ expected["nonce"] = actual["nonce"]
+ if actual["cluster_name"] == nil {
+ delete(expected, "cluster_name")
+ } else {
+ expected["cluster_name"] = actual["cluster_name"]
+ }
+ if actual["cluster_id"] == nil {
+ delete(expected, "cluster_id")
+ } else {
+ expected["cluster_id"] = actual["cluster_id"]
+ }
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("\nexpected:\n%#v\nactual:\n%#v\n", expected, actual)
+ }
+ }
+
+ resp = testHttpPut(t, "", addr+"/v1/sys/unseal", map[string]interface{}{
+ "reset": true,
+ })
+
+ actual = map[string]interface{}{}
+ expected := map[string]interface{}{
+ "sealed": true,
+ "t": json.Number("3"),
+ "n": json.Number("5"),
+ "progress": json.Number("0"),
+ }
+ testResponseStatus(t, resp, 200)
+ testResponseBody(t, resp, &actual)
+ if actual["version"] == nil {
+ t.Fatalf("expected version information")
+ }
+ expected["version"] = actual["version"]
+ expected["nonce"] = actual["nonce"]
+ if actual["cluster_name"] == nil {
+ delete(expected, "cluster_name")
+ } else {
+ expected["cluster_name"] = actual["cluster_name"]
+ }
+ if actual["cluster_id"] == nil {
+ delete(expected, "cluster_id")
+ } else {
+ expected["cluster_id"] = actual["cluster_id"]
+ }
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("\nexpected:\n%#v\nactual:\n%#v\n", expected, actual)
+ }
+
+}
+
+// Test Seal's permissions logic, which is slightly different than normal code
+// paths in that it queries the ACL rather than having checkToken do it. This
+// is because it was abusing RootPaths in logical_system, but that caused some
+// haywire with code paths that expected there to be an actual corresponding
+// logical.Path for it. This way is less hacky, but this test ensures that we
+// have not opened up a permissions hole.
+func TestSysSeal_Permissions(t *testing.T) {
+ core, _, root := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, root)
+
+ // Set the 'test' policy object to permit write access to sys/seal
+ req := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "sys/policy/test",
+ Data: map[string]interface{}{
+ "rules": `path "sys/seal" { capabilities = ["read"] }`,
+ },
+ ClientToken: root,
+ }
+ resp, err := core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Create a non-root token with access to that policy
+ req.Path = "auth/token/create"
+ req.Data = map[string]interface{}{
+ "id": "child",
+ "policies": []string{"test"},
+ }
+
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp.Auth.ClientToken != "child" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // We must go through the HTTP interface since seal doesn't go through HandleRequest
+
+ // We expect this to fail since it needs update and sudo
+ httpResp := testHttpPut(t, "child", addr+"/v1/sys/seal", nil)
+ testResponseStatus(t, httpResp, 403)
+
+ // Now modify to add update capability
+ req = &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "sys/policy/test",
+ Data: map[string]interface{}{
+ "rules": `path "sys/seal" { capabilities = ["update"] }`,
+ },
+ ClientToken: root,
+ }
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // We expect this to fail since it needs sudo
+ httpResp = testHttpPut(t, "child", addr+"/v1/sys/seal", nil)
+ testResponseStatus(t, httpResp, 403)
+
+ // Now modify to just sudo capability
+ req = &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "sys/policy/test",
+ Data: map[string]interface{}{
+ "rules": `path "sys/seal" { capabilities = ["sudo"] }`,
+ },
+ ClientToken: root,
+ }
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // We expect this to fail since it needs update
+ httpResp = testHttpPut(t, "child", addr+"/v1/sys/seal", nil)
+ testResponseStatus(t, httpResp, 403)
+
+ // Now modify to add all needed capabilities
+ req = &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "sys/policy/test",
+ Data: map[string]interface{}{
+ "rules": `path "sys/seal" { capabilities = ["update", "sudo"] }`,
+ },
+ ClientToken: root,
+ }
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // We expect this to work
+ httpResp = testHttpPut(t, "child", addr+"/v1/sys/seal", nil)
+ testResponseStatus(t, httpResp, 204)
+}
+
+func TestSysStepDown(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ resp := testHttpPut(t, token, addr+"/v1/sys/step-down", nil)
+ testResponseStatus(t, resp, 204)
+}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_wrapping_test.go b/vendor/github.com/hashicorp/vault/http/sys_wrapping_test.go
new file mode 100644
index 0000000..9c27ebb
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/sys_wrapping_test.go
@@ -0,0 +1,335 @@
+package http
+
+import (
+ "encoding/json"
+ "net/http"
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/vault"
+)
+
+// Test wrapping functionality
+func TestHTTP_Wrapping(t *testing.T) {
+ handler1 := http.NewServeMux()
+ handler2 := http.NewServeMux()
+ handler3 := http.NewServeMux()
+
+ coreConfig := &vault.CoreConfig{}
+
+ // Chicken-and-egg: Handler needs a core. So we create handlers first, then
+ // add routes chained to a Handler-created handler.
+ cores := vault.TestCluster(t, []http.Handler{handler1, handler2, handler3}, coreConfig, true)
+ for _, core := range cores {
+ defer core.CloseListeners()
+ }
+ handler1.Handle("/", Handler(cores[0].Core))
+ handler2.Handle("/", Handler(cores[1].Core))
+ handler3.Handle("/", Handler(cores[2].Core))
+
+ // make it easy to get access to the active
+ core := cores[0].Core
+ vault.TestWaitActive(t, core)
+
+ root := cores[0].Root
+ client := cores[0].Client
+ client.SetToken(root)
+
+ // Write a value that we will use with wrapping for lookup
+ _, err := client.Logical().Write("secret/foo", map[string]interface{}{
+ "zip": "zap",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Set a wrapping lookup function for reads on that path
+ client.SetWrappingLookupFunc(func(operation, path string) string {
+ if operation == "GET" && path == "secret/foo" {
+ return "5m"
+ }
+
+ return api.DefaultWrappingLookupFunc(operation, path)
+ })
+
+ // First test: basic things that should fail, lookup edition
+ // Root token isn't a wrapping token
+ _, err = client.Logical().Write("sys/wrapping/lookup", nil)
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ // Not supplied
+ _, err = client.Logical().Write("sys/wrapping/lookup", map[string]interface{}{
+ "foo": "bar",
+ })
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ // Nonexistent token isn't a wrapping token
+ _, err = client.Logical().Write("sys/wrapping/lookup", map[string]interface{}{
+ "token": "bar",
+ })
+ if err == nil {
+ t.Fatal("expected error")
+ }
+
+ // Second: basic things that should fail, unwrap edition
+ // Root token isn't a wrapping token
+ _, err = client.Logical().Unwrap(root)
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ // Root token isn't a wrapping token
+ _, err = client.Logical().Write("sys/wrapping/unwrap", nil)
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ // Not supplied
+ _, err = client.Logical().Write("sys/wrapping/unwrap", map[string]interface{}{
+ "foo": "bar",
+ })
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ // Nonexistent token isn't a wrapping token
+ _, err = client.Logical().Write("sys/wrapping/unwrap", map[string]interface{}{
+ "token": "bar",
+ })
+ if err == nil {
+ t.Fatal("expected error")
+ }
+
+ //
+ // Test lookup
+ //
+
+ // Create a wrapping token
+ secret, err := client.Logical().Read("secret/foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if secret == nil || secret.WrapInfo == nil {
+ t.Fatal("secret or wrap info is nil")
+ }
+ wrapInfo := secret.WrapInfo
+
+ // Test this twice to ensure no ill effect to the wrapping token as a result of the lookup
+ for i := 0; i < 2; i++ {
+ secret, err = client.Logical().Write("sys/wrapping/lookup", map[string]interface{}{
+ "token": wrapInfo.Token,
+ })
+ if secret == nil || secret.Data == nil {
+ t.Fatal("secret or secret data is nil")
+ }
+ creationTTL, _ := secret.Data["creation_ttl"].(json.Number).Int64()
+ if int(creationTTL) != wrapInfo.TTL {
+ t.Fatalf("mistmatched ttls: %d vs %d", creationTTL, wrapInfo.TTL)
+ }
+ if secret.Data["creation_time"].(string) != wrapInfo.CreationTime.Format(time.RFC3339Nano) {
+ t.Fatalf("mistmatched creation times: %d vs %d", secret.Data["creation_time"].(string), wrapInfo.CreationTime.Format(time.RFC3339Nano))
+ }
+ }
+
+ //
+ // Test unwrap
+ //
+
+ // Create a wrapping token
+ secret, err = client.Logical().Read("secret/foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if secret == nil || secret.WrapInfo == nil {
+ t.Fatal("secret or wrap info is nil")
+ }
+ wrapInfo = secret.WrapInfo
+
+ // Test unwrap via the client token
+ client.SetToken(wrapInfo.Token)
+ secret, err = client.Logical().Write("sys/wrapping/unwrap", nil)
+ if secret == nil || secret.Data == nil {
+ t.Fatal("secret or secret data is nil")
+ }
+ ret1 := secret
+ // Should be expired and fail
+ _, err = client.Logical().Write("sys/wrapping/unwrap", nil)
+ if err == nil {
+ t.Fatal("expected err")
+ }
+
+ // Create a wrapping token
+ client.SetToken(root)
+ secret, err = client.Logical().Read("secret/foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if secret == nil || secret.WrapInfo == nil {
+ t.Fatal("secret or wrap info is nil")
+ }
+ wrapInfo = secret.WrapInfo
+
+ // Test as a separate token
+ secret, err = client.Logical().Write("sys/wrapping/unwrap", map[string]interface{}{
+ "token": wrapInfo.Token,
+ })
+ ret2 := secret
+ // Should be expired and fail
+ _, err = client.Logical().Write("sys/wrapping/unwrap", map[string]interface{}{
+ "token": wrapInfo.Token,
+ })
+ if err == nil {
+ t.Fatal("expected err")
+ }
+
+ // Create a wrapping token
+ secret, err = client.Logical().Read("secret/foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if secret == nil || secret.WrapInfo == nil {
+ t.Fatal("secret or wrap info is nil")
+ }
+ wrapInfo = secret.WrapInfo
+
+ // Read response directly
+ client.SetToken(wrapInfo.Token)
+ secret, err = client.Logical().Read("cubbyhole/response")
+ ret3 := secret
+ // Should be expired and fail
+ _, err = client.Logical().Write("cubbyhole/response", nil)
+ if err == nil {
+ t.Fatal("expected err")
+ }
+
+ // Create a wrapping token
+ client.SetToken(root)
+ secret, err = client.Logical().Read("secret/foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if secret == nil || secret.WrapInfo == nil {
+ t.Fatal("secret or wrap info is nil")
+ }
+ wrapInfo = secret.WrapInfo
+
+ // Read via Unwrap method
+ secret, err = client.Logical().Unwrap(wrapInfo.Token)
+ ret4 := secret
+ // Should be expired and fail
+ _, err = client.Logical().Unwrap(wrapInfo.Token)
+ if err == nil {
+ t.Fatal("expected err")
+ }
+
+ if !reflect.DeepEqual(ret1.Data, map[string]interface{}{
+ "zip": "zap",
+ }) {
+ t.Fatalf("ret1 data did not match expected: %#v", ret1.Data)
+ }
+ if !reflect.DeepEqual(ret2.Data, map[string]interface{}{
+ "zip": "zap",
+ }) {
+ t.Fatalf("ret2 data did not match expected: %#v", ret2.Data)
+ }
+ var ret3Secret api.Secret
+ err = jsonutil.DecodeJSON([]byte(ret3.Data["response"].(string)), &ret3Secret)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(ret3Secret.Data, map[string]interface{}{
+ "zip": "zap",
+ }) {
+ t.Fatalf("ret3 data did not match expected: %#v", ret3Secret.Data)
+ }
+ if !reflect.DeepEqual(ret4.Data, map[string]interface{}{
+ "zip": "zap",
+ }) {
+ t.Fatalf("ret4 data did not match expected: %#v", ret4.Data)
+ }
+
+ //
+ // Custom wrapping
+ //
+
+ client.SetToken(root)
+ data := map[string]interface{}{
+ "zip": "zap",
+ "three": json.Number("2"),
+ }
+
+ // Don't set a request TTL on that path, should fail
+ client.SetWrappingLookupFunc(func(operation, path string) string {
+ return ""
+ })
+ secret, err = client.Logical().Write("sys/wrapping/wrap", data)
+ if err == nil {
+ t.Fatal("expected error")
+ }
+
+ // Re-set the lookup function
+ client.SetWrappingLookupFunc(func(operation, path string) string {
+ if operation == "GET" && path == "secret/foo" {
+ return "5m"
+ }
+
+ return api.DefaultWrappingLookupFunc(operation, path)
+ })
+ secret, err = client.Logical().Write("sys/wrapping/wrap", data)
+ if err != nil {
+ t.Fatal(err)
+ }
+ secret, err = client.Logical().Unwrap(secret.WrapInfo.Token)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(data, secret.Data) {
+ t.Fatalf("custom wrap did not match expected: %#v", secret.Data)
+ }
+
+ //
+ // Test rewrap
+ //
+
+ // Create a wrapping token
+ secret, err = client.Logical().Read("secret/foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if secret == nil || secret.WrapInfo == nil {
+ t.Fatal("secret or wrap info is nil")
+ }
+ wrapInfo = secret.WrapInfo
+
+ // Test rewrapping
+ secret, err = client.Logical().Write("sys/wrapping/rewrap", map[string]interface{}{
+ "token": wrapInfo.Token,
+ })
+ // Should be expired and fail
+ _, err = client.Logical().Write("sys/wrapping/unwrap", map[string]interface{}{
+ "token": wrapInfo.Token,
+ })
+ if err == nil {
+ t.Fatal("expected err")
+ }
+
+ // Attempt unwrapping the rewrapped token
+ wrapToken := secret.WrapInfo.Token
+ secret, err = client.Logical().Unwrap(wrapToken)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Should be expired and fail
+ _, err = client.Logical().Unwrap(wrapToken)
+ if err == nil {
+ t.Fatal("expected err")
+ }
+
+ if !reflect.DeepEqual(secret.Data, map[string]interface{}{
+ "zip": "zap",
+ }) {
+ t.Fatalf("secret data did not match expected: %#v", secret.Data)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/http/testing.go b/vendor/github.com/hashicorp/vault/http/testing.go
new file mode 100644
index 0000000..543b3e6
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/testing.go
@@ -0,0 +1,61 @@
+package http
+
+import (
+ "fmt"
+ "net"
+ "net/http"
+ "testing"
+
+ "golang.org/x/net/http2"
+
+ "github.com/hashicorp/vault/vault"
+)
+
+func TestListener(t *testing.T) (net.Listener, string) {
+ fail := func(format string, args ...interface{}) {
+ panic(fmt.Sprintf(format, args...))
+ }
+ if t != nil {
+ fail = t.Fatalf
+ }
+
+ ln, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ fail("err: %s", err)
+ }
+ addr := "http://" + ln.Addr().String()
+ return ln, addr
+}
+
+func TestServerWithListener(t *testing.T, ln net.Listener, addr string, core *vault.Core) {
+ // Create a muxer to handle our requests so that we can authenticate
+ // for tests.
+ mux := http.NewServeMux()
+ mux.Handle("/_test/auth", http.HandlerFunc(testHandleAuth))
+ mux.Handle("/", Handler(core))
+
+ server := &http.Server{
+ Addr: ln.Addr().String(),
+ Handler: mux,
+ }
+ if err := http2.ConfigureServer(server, nil); err != nil {
+ t.Fatal(err)
+ }
+ go server.Serve(ln)
+}
+
+func TestServer(t *testing.T, core *vault.Core) (net.Listener, string) {
+ ln, addr := TestListener(t)
+ TestServerWithListener(t, ln, addr, core)
+ return ln, addr
+}
+
+func TestServerAuth(t *testing.T, addr string, token string) {
+ if _, err := http.Get(addr + "/_test/auth?token=" + token); err != nil {
+ t.Fatalf("error authenticating: %s", err)
+ }
+}
+
+func testHandleAuth(w http.ResponseWriter, req *http.Request) {
+ respondOk(w, nil)
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/auth.go b/vendor/github.com/hashicorp/vault/logical/auth.go
new file mode 100644
index 0000000..b454790
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/auth.go
@@ -0,0 +1,58 @@
+package logical
+
+import (
+ "fmt"
+ "time"
+)
+
+// Auth is the resulting authentication information that is part of
+// Response for credential backends.
+type Auth struct {
+ LeaseOptions
+
+ // InternalData is JSON-encodable data that is stored with the auth struct.
+ // This will be sent back during a Renew/Revoke for storing internal data
+ // used for those operations.
+ InternalData map[string]interface{} `json:"internal_data" mapstructure:"internal_data" structs:"internal_data"`
+
+ // DisplayName is a non-security sensitive identifier that is
+ // applicable to this Auth. It is used for logging and prefixing
+ // of dynamic secrets. For example, DisplayName may be "armon" for
+ // the github credential backend. If the client token is used to
+ // generate a SQL credential, the user may be "github-armon-uuid".
+ // This is to help identify the source without using audit tables.
+ DisplayName string `json:"display_name" mapstructure:"display_name" structs:"display_name"`
+
+ // Policies is the list of policies that the authenticated user
+ // is associated with.
+ Policies []string `json:"policies" mapstructure:"policies" structs:"policies"`
+
+ // Metadata is used to attach arbitrary string-type metadata to
+ // an authenticated user. This metadata will be outputted into the
+ // audit log.
+ Metadata map[string]string `json:"metadata" mapstructure:"metadata" structs:"metadata"`
+
+ // ClientToken is the token that is generated for the authentication.
+ // This will be filled in by Vault core when an auth structure is
+ // returned. Setting this manually will have no effect.
+ ClientToken string `json:"client_token" mapstructure:"client_token" structs:"client_token"`
+
+ // Accessor is the identifier for the ClientToken. This can be used
+ // to perform management functionalities (especially revocation) when
+ // ClientToken in the audit logs are obfuscated. Accessor can be used
+ // to revoke a ClientToken and to lookup the capabilities of the ClientToken,
+ // both without actually knowing the ClientToken.
+ Accessor string `json:"accessor" mapstructure:"accessor" structs:"accessor"`
+
+ // Period indicates that the token generated using this Auth object
+ // should never expire. The token should be renewed within the duration
+ // specified by this period.
+ Period time.Duration `json:"period" mapstructure:"period" structs:"period"`
+
+ // Number of allowed uses of the issued token
+ NumUses int `json:"num_uses" mapstructure:"num_uses" structs:"num_uses"`
+}
+
+func (a *Auth) GoString() string {
+ return fmt.Sprintf("*%#v", *a)
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/connection.go b/vendor/github.com/hashicorp/vault/logical/connection.go
new file mode 100644
index 0000000..d54a0f5
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/connection.go
@@ -0,0 +1,15 @@
+package logical
+
+import (
+ "crypto/tls"
+)
+
+// Connection represents the connection information for a request. This
+// is present on the Request structure for credential backends.
+type Connection struct {
+ // RemoteAddr is the network address that sent the request.
+ RemoteAddr string `json:"remote_addr"`
+
+ // ConnState is the TLS connection state if applicable.
+ ConnState *tls.ConnectionState
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/error.go b/vendor/github.com/hashicorp/vault/logical/error.go
new file mode 100644
index 0000000..19e3e2d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/error.go
@@ -0,0 +1,47 @@
+package logical
+
+type HTTPCodedError interface {
+ Error() string
+ Code() int
+}
+
+func CodedError(c int, s string) HTTPCodedError {
+ return &codedError{s, c}
+}
+
+type codedError struct {
+ s string
+ code int
+}
+
+func (e *codedError) Error() string {
+ return e.s
+}
+
+func (e *codedError) Code() int {
+ return e.code
+}
+
+// Struct to identify user input errors. This is helpful in responding the
+// appropriate status codes to clients from the HTTP endpoints.
+type StatusBadRequest struct {
+ Err string
+}
+
+// Implementing error interface
+func (s *StatusBadRequest) Error() string {
+ return s.Err
+}
+
+// This is a new type declared to not cause potential compatibility problems if
+// the logic around the HTTPCodedError interface changes; in particular for
+// logical request paths it is basically ignored, and changing that behavior
+// might cause unforseen issues.
+type ReplicationCodedError struct {
+ Msg string
+ Code int
+}
+
+func (r *ReplicationCodedError) Error() string {
+ return r.Msg
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/backend.go b/vendor/github.com/hashicorp/vault/logical/framework/backend.go
new file mode 100644
index 0000000..e94ea04
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/framework/backend.go
@@ -0,0 +1,626 @@
+package framework
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "regexp"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ log "github.com/mgutz/logxi/v1"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/vault/helper/errutil"
+ "github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/helper/parseutil"
+ "github.com/hashicorp/vault/logical"
+)
+
+// Backend is an implementation of logical.Backend that allows
+// the implementer to code a backend using a much more programmer-friendly
+// framework that handles a lot of the routing and validation for you.
+//
+// This is recommended over implementing logical.Backend directly.
+type Backend struct {
+ // Help is the help text that is shown when a help request is made
+ // on the root of this resource. The root help is special since we
+ // show all the paths that can be requested.
+ Help string
+
+ // Paths are the various routes that the backend responds to.
+ // This cannot be modified after construction (i.e. dynamically changing
+ // paths, including adding or removing, is not allowed once the
+ // backend is in use).
+ //
+ // PathsSpecial is the list of path patterns that denote the
+ // paths above that require special privileges. These can't be
+ // regular expressions, it is either exact match or prefix match.
+ // For prefix match, append '*' as a suffix.
+ Paths []*Path
+ PathsSpecial *logical.Paths
+
+ // Secrets is the list of secret types that this backend can
+ // return. It is used to automatically generate proper responses,
+ // and ease specifying callbacks for revocation, renewal, etc.
+ Secrets []*Secret
+
+ // PeriodicFunc is the callback, which if set, will be invoked when the
+ // periodic timer of RollbackManager ticks. This can be used by
+ // backends to do anything it wishes to do periodically.
+ //
+ // PeriodicFunc can be invoked to, say to periodically delete stale
+ // entries in backend's storage, while the backend is still being used.
+ // (Note the different of this action from what `Clean` does, which is
+ // invoked just before the backend is unmounted).
+ PeriodicFunc periodicFunc
+
+ // WALRollback is called when a WAL entry (see wal.go) has to be rolled
+ // back. It is called with the data from the entry.
+ //
+ // WALRollbackMinAge is the minimum age of a WAL entry before it is attempted
+ // to be rolled back. This should be longer than the maximum time it takes
+ // to successfully create a secret.
+ WALRollback WALRollbackFunc
+ WALRollbackMinAge time.Duration
+
+ // Clean is called on unload to clean up e.g any existing connections
+ // to the backend, if required.
+ Clean CleanupFunc
+
+ // Initialize is called after a backend is created. Storage should not be
+ // written to before this function is called.
+ Init InitializeFunc
+
+ // Invalidate is called when a keys is modified if required
+ Invalidate InvalidateFunc
+
+ // AuthRenew is the callback to call when a RenewRequest for an
+ // authentication comes in. By default, renewal won't be allowed.
+ // See the built-in AuthRenew helpers in lease.go for common callbacks.
+ AuthRenew OperationFunc
+
+ logger log.Logger
+ system logical.SystemView
+ once sync.Once
+ pathsRe []*regexp.Regexp
+}
+
+// periodicFunc is the callback called when the RollbackManager's timer ticks.
+// This can be utilized by the backends to do anything it wants.
+type periodicFunc func(*logical.Request) error
+
+// OperationFunc is the callback called for an operation on a path.
+type OperationFunc func(*logical.Request, *FieldData) (*logical.Response, error)
+
+// WALRollbackFunc is the callback for rollbacks.
+type WALRollbackFunc func(*logical.Request, string, interface{}) error
+
+// CleanupFunc is the callback for backend unload.
+type CleanupFunc func()
+
+// InitializeFunc is the callback for backend creation.
+type InitializeFunc func() error
+
+// InvalidateFunc is the callback for backend key invalidation.
+type InvalidateFunc func(string)
+
+func (b *Backend) HandleExistenceCheck(req *logical.Request) (checkFound bool, exists bool, err error) {
+ b.once.Do(b.init)
+
+ // Ensure we are only doing this when one of the correct operations is in play
+ switch req.Operation {
+ case logical.CreateOperation:
+ case logical.UpdateOperation:
+ default:
+ return false, false, fmt.Errorf("incorrect operation type %v for an existence check", req.Operation)
+ }
+
+ // Find the matching route
+ path, captures := b.route(req.Path)
+ if path == nil {
+ return false, false, logical.ErrUnsupportedPath
+ }
+
+ if path.ExistenceCheck == nil {
+ return false, false, nil
+ }
+
+ checkFound = true
+
+ // Build up the data for the route, with the URL taking priority
+ // for the fields over the PUT data.
+ raw := make(map[string]interface{}, len(path.Fields))
+ for k, v := range req.Data {
+ raw[k] = v
+ }
+ for k, v := range captures {
+ raw[k] = v
+ }
+
+ fd := FieldData{
+ Raw: raw,
+ Schema: path.Fields}
+
+ err = fd.Validate()
+ if err != nil {
+ return false, false, errutil.UserError{Err: err.Error()}
+ }
+
+ // Call the callback with the request and the data
+ exists, err = path.ExistenceCheck(req, &fd)
+ return
+}
+
+// logical.Backend impl.
+func (b *Backend) HandleRequest(req *logical.Request) (*logical.Response, error) {
+ b.once.Do(b.init)
+
+ // Check for special cased global operations. These don't route
+ // to a specific Path.
+ switch req.Operation {
+ case logical.RenewOperation:
+ fallthrough
+ case logical.RevokeOperation:
+ return b.handleRevokeRenew(req)
+ case logical.RollbackOperation:
+ return b.handleRollback(req)
+ }
+
+ // If the path is empty and it is a help operation, handle that.
+ if req.Path == "" && req.Operation == logical.HelpOperation {
+ return b.handleRootHelp()
+ }
+
+ // Find the matching route
+ path, captures := b.route(req.Path)
+ if path == nil {
+ return nil, logical.ErrUnsupportedPath
+ }
+
+ // Build up the data for the route, with the URL taking priority
+ // for the fields over the PUT data.
+ raw := make(map[string]interface{}, len(path.Fields))
+ for k, v := range req.Data {
+ raw[k] = v
+ }
+ for k, v := range captures {
+ raw[k] = v
+ }
+
+ // Look up the callback for this operation
+ var callback OperationFunc
+ var ok bool
+ if path.Callbacks != nil {
+ callback, ok = path.Callbacks[req.Operation]
+ }
+ if !ok {
+ if req.Operation == logical.HelpOperation {
+ callback = path.helpCallback
+ ok = true
+ }
+ }
+ if !ok {
+ return nil, logical.ErrUnsupportedOperation
+ }
+
+ fd := FieldData{
+ Raw: raw,
+ Schema: path.Fields}
+
+ if req.Operation != logical.HelpOperation {
+ err := fd.Validate()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Call the callback with the request and the data
+ return callback(req, &fd)
+}
+
+// logical.Backend impl.
+func (b *Backend) SpecialPaths() *logical.Paths {
+ return b.PathsSpecial
+}
+
+// Setup is used to initialize the backend with the initial backend configuration
+func (b *Backend) Setup(config *logical.BackendConfig) (logical.Backend, error) {
+ b.logger = config.Logger
+ b.system = config.System
+ return b, nil
+}
+
+// Cleanup is used to release resources and prepare to stop the backend
+func (b *Backend) Cleanup() {
+ if b.Clean != nil {
+ b.Clean()
+ }
+}
+
+func (b *Backend) Initialize() error {
+ if b.Init != nil {
+ return b.Init()
+ }
+
+ return nil
+}
+
+// InvalidateKey is used to clear caches and reset internal state on key changes
+func (b *Backend) InvalidateKey(key string) {
+ if b.Invalidate != nil {
+ b.Invalidate(key)
+ }
+}
+
+// Logger can be used to get the logger. If no logger has been set,
+// the logs will be discarded.
+func (b *Backend) Logger() log.Logger {
+ if b.logger != nil {
+ return b.logger
+ }
+
+ return logformat.NewVaultLoggerWithWriter(ioutil.Discard, log.LevelOff)
+}
+
+func (b *Backend) System() logical.SystemView {
+ return b.system
+}
+
+// This method takes in the TTL and MaxTTL values provided by the user,
+// compares those with the SystemView values. If they are empty a value of 0 is
+// set, which will cause initial secret or LeaseExtend operations to use the
+// mount/system defaults. If they are set, their boundaries are validated.
+func (b *Backend) SanitizeTTLStr(ttlStr, maxTTLStr string) (ttl, maxTTL time.Duration, err error) {
+ if len(ttlStr) == 0 || ttlStr == "0" {
+ ttl = 0
+ } else {
+ ttl, err = time.ParseDuration(ttlStr)
+ if err != nil {
+ return 0, 0, fmt.Errorf("Invalid ttl: %s", err)
+ }
+ }
+
+ if len(maxTTLStr) == 0 || maxTTLStr == "0" {
+ maxTTL = 0
+ } else {
+ maxTTL, err = time.ParseDuration(maxTTLStr)
+ if err != nil {
+ return 0, 0, fmt.Errorf("Invalid max_ttl: %s", err)
+ }
+ }
+
+ ttl, maxTTL, err = b.SanitizeTTL(ttl, maxTTL)
+
+ return
+}
+
+// Caps the boundaries of ttl and max_ttl values to the backend mount's max_ttl value.
+func (b *Backend) SanitizeTTL(ttl, maxTTL time.Duration) (time.Duration, time.Duration, error) {
+ sysMaxTTL := b.System().MaxLeaseTTL()
+ if ttl > sysMaxTTL {
+ return 0, 0, fmt.Errorf("\"ttl\" value must be less than allowed max lease TTL value '%s'", sysMaxTTL.String())
+ }
+ if maxTTL > sysMaxTTL {
+ return 0, 0, fmt.Errorf("\"max_ttl\" value must be less than allowed max lease TTL value '%s'", sysMaxTTL.String())
+ }
+ if ttl > maxTTL && maxTTL != 0 {
+ ttl = maxTTL
+ }
+ return ttl, maxTTL, nil
+}
+
+// Route looks up the path that would be used for a given path string.
+func (b *Backend) Route(path string) *Path {
+ result, _ := b.route(path)
+ return result
+}
+
+// Secret is used to look up the secret with the given type.
+func (b *Backend) Secret(k string) *Secret {
+ for _, s := range b.Secrets {
+ if s.Type == k {
+ return s
+ }
+ }
+
+ return nil
+}
+
+func (b *Backend) init() {
+ b.pathsRe = make([]*regexp.Regexp, len(b.Paths))
+ for i, p := range b.Paths {
+ if len(p.Pattern) == 0 {
+ panic(fmt.Sprintf("Routing pattern cannot be blank"))
+ }
+ // Automatically anchor the pattern
+ if p.Pattern[0] != '^' {
+ p.Pattern = "^" + p.Pattern
+ }
+ if p.Pattern[len(p.Pattern)-1] != '$' {
+ p.Pattern = p.Pattern + "$"
+ }
+ b.pathsRe[i] = regexp.MustCompile(p.Pattern)
+ }
+}
+
+func (b *Backend) route(path string) (*Path, map[string]string) {
+ b.once.Do(b.init)
+
+ for i, re := range b.pathsRe {
+ matches := re.FindStringSubmatch(path)
+ if matches == nil {
+ continue
+ }
+
+ // We have a match, determine the mapping of the captures and
+ // store that for returning.
+ var captures map[string]string
+ path := b.Paths[i]
+ if captureNames := re.SubexpNames(); len(captureNames) > 1 {
+ captures = make(map[string]string, len(captureNames))
+ for i, name := range captureNames {
+ if name != "" {
+ captures[name] = matches[i]
+ }
+ }
+ }
+
+ return path, captures
+ }
+
+ return nil, nil
+}
+
+func (b *Backend) handleRootHelp() (*logical.Response, error) {
+ // Build a mapping of the paths and get the paths alphabetized to
+ // make the output prettier.
+ pathsMap := make(map[string]*Path)
+ paths := make([]string, 0, len(b.Paths))
+ for i, p := range b.pathsRe {
+ paths = append(paths, p.String())
+ pathsMap[p.String()] = b.Paths[i]
+ }
+ sort.Strings(paths)
+
+ // Build the path data
+ pathData := make([]rootHelpTemplatePath, 0, len(paths))
+ for _, route := range paths {
+ p := pathsMap[route]
+ pathData = append(pathData, rootHelpTemplatePath{
+ Path: route,
+ Help: strings.TrimSpace(p.HelpSynopsis),
+ })
+ }
+
+ help, err := executeTemplate(rootHelpTemplate, &rootHelpTemplateData{
+ Help: strings.TrimSpace(b.Help),
+ Paths: pathData,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return logical.HelpResponse(help, nil), nil
+}
+
+func (b *Backend) handleRevokeRenew(
+ req *logical.Request) (*logical.Response, error) {
+ // Special case renewal of authentication for credential backends
+ if req.Operation == logical.RenewOperation && req.Auth != nil {
+ return b.handleAuthRenew(req)
+ }
+
+ if req.Secret == nil {
+ return nil, fmt.Errorf("request has no secret")
+ }
+
+ rawSecretType, ok := req.Secret.InternalData["secret_type"]
+ if !ok {
+ return nil, fmt.Errorf("secret is unsupported by this backend")
+ }
+ secretType, ok := rawSecretType.(string)
+ if !ok {
+ return nil, fmt.Errorf("secret is unsupported by this backend")
+ }
+
+ secret := b.Secret(secretType)
+ if secret == nil {
+ return nil, fmt.Errorf("secret is unsupported by this backend")
+ }
+
+ switch req.Operation {
+ case logical.RenewOperation:
+ return secret.HandleRenew(req)
+ case logical.RevokeOperation:
+ return secret.HandleRevoke(req)
+ default:
+ return nil, fmt.Errorf(
+ "invalid operation for revoke/renew: %s", req.Operation)
+ }
+}
+
+// handleRollback invokes the PeriodicFunc set on the backend. It also does a WAL rollback operation.
+func (b *Backend) handleRollback(
+ req *logical.Request) (*logical.Response, error) {
+ // Response is not expected from the periodic operation.
+ if b.PeriodicFunc != nil {
+ if err := b.PeriodicFunc(req); err != nil {
+ return nil, err
+ }
+ }
+
+ return b.handleWALRollback(req)
+}
+
+func (b *Backend) handleAuthRenew(req *logical.Request) (*logical.Response, error) {
+ if b.AuthRenew == nil {
+ return logical.ErrorResponse("this auth type doesn't support renew"), nil
+ }
+
+ return b.AuthRenew(req, nil)
+}
+
+func (b *Backend) handleWALRollback(
+ req *logical.Request) (*logical.Response, error) {
+ if b.WALRollback == nil {
+ return nil, logical.ErrUnsupportedOperation
+ }
+
+ var merr error
+ keys, err := ListWAL(req.Storage)
+ if err != nil {
+ return logical.ErrorResponse(err.Error()), nil
+ }
+ if len(keys) == 0 {
+ return nil, nil
+ }
+
+ // Calculate the minimum time that the WAL entries could be
+ // created in order to be rolled back.
+ age := b.WALRollbackMinAge
+ if age == 0 {
+ age = 10 * time.Minute
+ }
+ minAge := time.Now().Add(-1 * age)
+ if _, ok := req.Data["immediate"]; ok {
+ minAge = time.Now().Add(1000 * time.Hour)
+ }
+
+ for _, k := range keys {
+ entry, err := GetWAL(req.Storage, k)
+ if err != nil {
+ merr = multierror.Append(merr, err)
+ continue
+ }
+ if entry == nil {
+ continue
+ }
+
+ // If the entry isn't old enough, then don't roll it back
+ if !time.Unix(entry.CreatedAt, 0).Before(minAge) {
+ continue
+ }
+
+ // Attempt a WAL rollback
+ err = b.WALRollback(req, entry.Kind, entry.Data)
+ if err != nil {
+ err = fmt.Errorf(
+ "Error rolling back '%s' entry: %s", entry.Kind, err)
+ }
+ if err == nil {
+ err = DeleteWAL(req.Storage, k)
+ }
+ if err != nil {
+ merr = multierror.Append(merr, err)
+ }
+ }
+
+ if merr == nil {
+ return nil, nil
+ }
+
+ return logical.ErrorResponse(merr.Error()), nil
+}
+
+// FieldSchema is a basic schema to describe the format of a path field.
+type FieldSchema struct {
+ Type FieldType
+ Default interface{}
+ Description string
+}
+
+// DefaultOrZero returns the default value if it is set, or otherwise
+// the zero value of the type.
+func (s *FieldSchema) DefaultOrZero() interface{} {
+ if s.Default != nil {
+ switch s.Type {
+ case TypeDurationSecond:
+ var result int
+ switch inp := s.Default.(type) {
+ case nil:
+ return s.Type.Zero()
+ case int:
+ result = inp
+ case int64:
+ result = int(inp)
+ case float32:
+ result = int(inp)
+ case float64:
+ result = int(inp)
+ case string:
+ dur, err := parseutil.ParseDurationSecond(inp)
+ if err != nil {
+ return s.Type.Zero()
+ }
+ result = int(dur.Seconds())
+ case json.Number:
+ valInt64, err := inp.Int64()
+ if err != nil {
+ return s.Type.Zero()
+ }
+ result = int(valInt64)
+ default:
+ return s.Type.Zero()
+ }
+ return result
+
+ default:
+ return s.Default
+ }
+ }
+
+ return s.Type.Zero()
+}
+
+func (t FieldType) Zero() interface{} {
+ switch t {
+ case TypeString:
+ return ""
+ case TypeInt:
+ return 0
+ case TypeBool:
+ return false
+ case TypeMap:
+ return map[string]interface{}{}
+ case TypeDurationSecond:
+ return 0
+ case TypeSlice:
+ return []interface{}{}
+ case TypeStringSlice, TypeCommaStringSlice:
+ return []string{}
+ default:
+ panic("unknown type: " + t.String())
+ }
+}
+
+type rootHelpTemplateData struct {
+ Help string
+ Paths []rootHelpTemplatePath
+}
+
+type rootHelpTemplatePath struct {
+ Path string
+ Help string
+}
+
+const rootHelpTemplate = `
+## DESCRIPTION
+
+{{.Help}}
+
+## PATHS
+
+The following paths are supported by this backend. To view help for
+any of the paths below, use the help command with any route matching
+the path pattern. Note that depending on the policy of your auth token,
+you may or may not be able to access certain paths.
+
+{{range .Paths}}{{indent 4 .Path}}
+{{indent 8 .Help}}
+
+{{end}}
+
+`
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/backend_test.go b/vendor/github.com/hashicorp/vault/logical/framework/backend_test.go
new file mode 100644
index 0000000..d94beed
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/framework/backend_test.go
@@ -0,0 +1,580 @@
+package framework
+
+import (
+ "reflect"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+func BenchmarkBackendRoute(b *testing.B) {
+ patterns := []string{
+ "foo",
+ "bar/(?P.+?)",
+ "baz/(?Pwhat)",
+ `aws/policy/(?P\w)`,
+ `aws/(?P\w)`,
+ }
+
+ backend := &Backend{Paths: make([]*Path, 0, len(patterns))}
+ for _, p := range patterns {
+ backend.Paths = append(backend.Paths, &Path{Pattern: p})
+ }
+
+ // Warm any caches
+ backend.Route("aws/policy/foo")
+
+ // Reset the timer since we did a lot above
+ b.ResetTimer()
+
+ // Run through and route. We do a sanity check of the return value
+ for i := 0; i < b.N; i++ {
+ if p := backend.Route("aws/policy/foo"); p == nil {
+ b.Fatal("p should not be nil")
+ }
+ }
+}
+
+func TestBackend_impl(t *testing.T) {
+ var _ logical.Backend = new(Backend)
+}
+
+func TestBackendHandleRequest(t *testing.T) {
+ callback := func(req *logical.Request, data *FieldData) (*logical.Response, error) {
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "value": data.Get("value"),
+ },
+ }, nil
+ }
+
+ b := &Backend{
+ Paths: []*Path{
+ &Path{
+ Pattern: "foo/bar",
+ Fields: map[string]*FieldSchema{
+ "value": &FieldSchema{Type: TypeInt},
+ },
+ Callbacks: map[logical.Operation]OperationFunc{
+ logical.ReadOperation: callback,
+ },
+ },
+ },
+ }
+
+ resp, err := b.HandleRequest(&logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "foo/bar",
+ Data: map[string]interface{}{"value": "42"},
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if resp.Data["value"] != 42 {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
+
+func TestBackendHandleRequest_badwrite(t *testing.T) {
+ callback := func(req *logical.Request, data *FieldData) (*logical.Response, error) {
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "value": data.Get("value").(bool),
+ },
+ }, nil
+ }
+
+ b := &Backend{
+ Paths: []*Path{
+ &Path{
+ Pattern: "foo/bar",
+ Fields: map[string]*FieldSchema{
+ "value": &FieldSchema{Type: TypeBool},
+ },
+ Callbacks: map[logical.Operation]OperationFunc{
+ logical.UpdateOperation: callback,
+ },
+ },
+ },
+ }
+
+ _, err := b.HandleRequest(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "foo/bar",
+ Data: map[string]interface{}{"value": "3false3"},
+ })
+
+ if err == nil {
+ t.Fatalf("should have thrown a conversion error")
+ }
+
+}
+
+func TestBackendHandleRequest_404(t *testing.T) {
+ callback := func(req *logical.Request, data *FieldData) (*logical.Response, error) {
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "value": data.Get("value"),
+ },
+ }, nil
+ }
+
+ b := &Backend{
+ Paths: []*Path{
+ &Path{
+ Pattern: `foo/bar`,
+ Fields: map[string]*FieldSchema{
+ "value": &FieldSchema{Type: TypeInt},
+ },
+ Callbacks: map[logical.Operation]OperationFunc{
+ logical.ReadOperation: callback,
+ },
+ },
+ },
+ }
+
+ _, err := b.HandleRequest(&logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "foo/baz",
+ Data: map[string]interface{}{"value": "84"},
+ })
+ if err != logical.ErrUnsupportedPath {
+ t.Fatalf("err: %s", err)
+ }
+}
+
+func TestBackendHandleRequest_help(t *testing.T) {
+ b := &Backend{
+ Paths: []*Path{
+ &Path{
+ Pattern: "foo/bar",
+ Fields: map[string]*FieldSchema{
+ "value": &FieldSchema{Type: TypeInt},
+ },
+ HelpSynopsis: "foo",
+ HelpDescription: "bar",
+ },
+ },
+ }
+
+ resp, err := b.HandleRequest(&logical.Request{
+ Operation: logical.HelpOperation,
+ Path: "foo/bar",
+ Data: map[string]interface{}{"value": "42"},
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if resp.Data["help"] == nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
+
+func TestBackendHandleRequest_helpRoot(t *testing.T) {
+ b := &Backend{
+ Help: "42",
+ }
+
+ resp, err := b.HandleRequest(&logical.Request{
+ Operation: logical.HelpOperation,
+ Path: "",
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if resp.Data["help"] == nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
+
+func TestBackendHandleRequest_renewAuth(t *testing.T) {
+ b := &Backend{}
+
+ resp, err := b.HandleRequest(logical.RenewAuthRequest(
+ "/foo", &logical.Auth{}, nil))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if !resp.IsError() {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
+
+func TestBackendHandleRequest_renewAuthCallback(t *testing.T) {
+ var called uint32
+ callback := func(*logical.Request, *FieldData) (*logical.Response, error) {
+ atomic.AddUint32(&called, 1)
+ return nil, nil
+ }
+
+ b := &Backend{
+ AuthRenew: callback,
+ }
+
+ _, err := b.HandleRequest(logical.RenewAuthRequest(
+ "/foo", &logical.Auth{}, nil))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if v := atomic.LoadUint32(&called); v != 1 {
+ t.Fatalf("bad: %#v", v)
+ }
+}
+func TestBackendHandleRequest_renew(t *testing.T) {
+ var called uint32
+ callback := func(*logical.Request, *FieldData) (*logical.Response, error) {
+ atomic.AddUint32(&called, 1)
+ return nil, nil
+ }
+
+ secret := &Secret{
+ Type: "foo",
+ Renew: callback,
+ }
+ b := &Backend{
+ Secrets: []*Secret{secret},
+ }
+
+ _, err := b.HandleRequest(logical.RenewRequest(
+ "/foo", secret.Response(nil, nil).Secret, nil))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if v := atomic.LoadUint32(&called); v != 1 {
+ t.Fatalf("bad: %#v", v)
+ }
+}
+
+func TestBackendHandleRequest_renewExtend(t *testing.T) {
+ sysView := logical.StaticSystemView{
+ DefaultLeaseTTLVal: 5 * time.Minute,
+ MaxLeaseTTLVal: 30 * time.Hour,
+ }
+
+ secret := &Secret{
+ Type: "foo",
+ Renew: LeaseExtend(0, 0, sysView),
+ DefaultDuration: 5 * time.Minute,
+ }
+ b := &Backend{
+ Secrets: []*Secret{secret},
+ }
+
+ req := logical.RenewRequest("/foo", secret.Response(nil, nil).Secret, nil)
+ req.Secret.IssueTime = time.Now()
+ req.Secret.Increment = 1 * time.Hour
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if resp == nil || resp.Secret == nil {
+ t.Fatal("should have secret")
+ }
+
+ if resp.Secret.TTL < 59*time.Minute || resp.Secret.TTL > 61*time.Minute {
+ t.Fatalf("bad: %s", resp.Secret.TTL)
+ }
+}
+
+func TestBackendHandleRequest_revoke(t *testing.T) {
+ var called uint32
+ callback := func(*logical.Request, *FieldData) (*logical.Response, error) {
+ atomic.AddUint32(&called, 1)
+ return nil, nil
+ }
+
+ secret := &Secret{
+ Type: "foo",
+ Revoke: callback,
+ }
+ b := &Backend{
+ Secrets: []*Secret{secret},
+ }
+
+ _, err := b.HandleRequest(logical.RevokeRequest(
+ "/foo", secret.Response(nil, nil).Secret, nil))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if v := atomic.LoadUint32(&called); v != 1 {
+ t.Fatalf("bad: %#v", v)
+ }
+}
+
+func TestBackendHandleRequest_rollback(t *testing.T) {
+ var called uint32
+ callback := func(req *logical.Request, kind string, data interface{}) error {
+ if data == "foo" {
+ atomic.AddUint32(&called, 1)
+ }
+
+ return nil
+ }
+
+ b := &Backend{
+ WALRollback: callback,
+ WALRollbackMinAge: 1 * time.Millisecond,
+ }
+
+ storage := new(logical.InmemStorage)
+ if _, err := PutWAL(storage, "kind", "foo"); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ time.Sleep(10 * time.Millisecond)
+
+ _, err := b.HandleRequest(&logical.Request{
+ Operation: logical.RollbackOperation,
+ Path: "",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if v := atomic.LoadUint32(&called); v != 1 {
+ t.Fatalf("bad: %#v", v)
+ }
+}
+
+func TestBackendHandleRequest_rollbackMinAge(t *testing.T) {
+ var called uint32
+ callback := func(req *logical.Request, kind string, data interface{}) error {
+ if data == "foo" {
+ atomic.AddUint32(&called, 1)
+ }
+
+ return nil
+ }
+
+ b := &Backend{
+ WALRollback: callback,
+ WALRollbackMinAge: 5 * time.Second,
+ }
+
+ storage := new(logical.InmemStorage)
+ if _, err := PutWAL(storage, "kind", "foo"); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ _, err := b.HandleRequest(&logical.Request{
+ Operation: logical.RollbackOperation,
+ Path: "",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if v := atomic.LoadUint32(&called); v != 0 {
+ t.Fatalf("bad: %#v", v)
+ }
+}
+
+func TestBackendHandleRequest_unsupportedOperation(t *testing.T) {
+ callback := func(req *logical.Request, data *FieldData) (*logical.Response, error) {
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "value": data.Get("value"),
+ },
+ }, nil
+ }
+
+ b := &Backend{
+ Paths: []*Path{
+ &Path{
+ Pattern: `foo/bar`,
+ Fields: map[string]*FieldSchema{
+ "value": &FieldSchema{Type: TypeInt},
+ },
+ Callbacks: map[logical.Operation]OperationFunc{
+ logical.ReadOperation: callback,
+ },
+ },
+ },
+ }
+
+ _, err := b.HandleRequest(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "foo/bar",
+ Data: map[string]interface{}{"value": "84"},
+ })
+ if err != logical.ErrUnsupportedOperation {
+ t.Fatalf("err: %s", err)
+ }
+}
+
+func TestBackendHandleRequest_urlPriority(t *testing.T) {
+ callback := func(req *logical.Request, data *FieldData) (*logical.Response, error) {
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "value": data.Get("value"),
+ },
+ }, nil
+ }
+
+ b := &Backend{
+ Paths: []*Path{
+ &Path{
+ Pattern: `foo/(?P\d+)`,
+ Fields: map[string]*FieldSchema{
+ "value": &FieldSchema{Type: TypeInt},
+ },
+ Callbacks: map[logical.Operation]OperationFunc{
+ logical.ReadOperation: callback,
+ },
+ },
+ },
+ }
+
+ resp, err := b.HandleRequest(&logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "foo/42",
+ Data: map[string]interface{}{"value": "84"},
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if resp.Data["value"] != 42 {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
+
+func TestBackendRoute(t *testing.T) {
+ cases := map[string]struct {
+ Patterns []string
+ Path string
+ Match string
+ }{
+ "no match": {
+ []string{"foo"},
+ "bar",
+ "",
+ },
+
+ "exact": {
+ []string{"foo"},
+ "foo",
+ "^foo$",
+ },
+
+ "regexp": {
+ []string{"fo+"},
+ "foo",
+ "^fo+$",
+ },
+
+ "anchor-start": {
+ []string{"bar"},
+ "foobar",
+ "",
+ },
+
+ "anchor-end": {
+ []string{"bar"},
+ "barfoo",
+ "",
+ },
+
+ "anchor-ambiguous": {
+ []string{"mounts", "sys/mounts"},
+ "sys/mounts",
+ "^sys/mounts$",
+ },
+ }
+
+ for n, tc := range cases {
+ paths := make([]*Path, len(tc.Patterns))
+ for i, pattern := range tc.Patterns {
+ paths[i] = &Path{Pattern: pattern}
+ }
+
+ b := &Backend{Paths: paths}
+ result := b.Route(tc.Path)
+ match := ""
+ if result != nil {
+ match = result.Pattern
+ }
+
+ if match != tc.Match {
+ t.Fatalf("bad: %s\n\nExpected: %s\nGot: %s",
+ n, tc.Match, match)
+ }
+ }
+}
+
+func TestBackendSecret(t *testing.T) {
+ cases := map[string]struct {
+ Secrets []*Secret
+ Search string
+ Match bool
+ }{
+ "no match": {
+ []*Secret{&Secret{Type: "foo"}},
+ "bar",
+ false,
+ },
+
+ "match": {
+ []*Secret{&Secret{Type: "foo"}},
+ "foo",
+ true,
+ },
+ }
+
+ for n, tc := range cases {
+ b := &Backend{Secrets: tc.Secrets}
+ result := b.Secret(tc.Search)
+ if tc.Match != (result != nil) {
+ t.Fatalf("bad: %s\n\nExpected match: %v", n, tc.Match)
+ }
+ if result != nil && result.Type != tc.Search {
+ t.Fatalf("bad: %s\n\nExpected matching type: %#v", n, result)
+ }
+ }
+}
+
+func TestFieldSchemaDefaultOrZero(t *testing.T) {
+ cases := map[string]struct {
+ Schema *FieldSchema
+ Value interface{}
+ }{
+ "default set": {
+ &FieldSchema{Type: TypeString, Default: "foo"},
+ "foo",
+ },
+
+ "default not set": {
+ &FieldSchema{Type: TypeString},
+ "",
+ },
+
+ "default duration set": {
+ &FieldSchema{Type: TypeDurationSecond, Default: 60},
+ 60,
+ },
+
+ "default duration int64": {
+ &FieldSchema{Type: TypeDurationSecond, Default: int64(60)},
+ 60,
+ },
+
+ "default duration string": {
+ &FieldSchema{Type: TypeDurationSecond, Default: "60s"},
+ 60,
+ },
+
+ "default duration not set": {
+ &FieldSchema{Type: TypeDurationSecond},
+ 0,
+ },
+ }
+
+ for name, tc := range cases {
+ actual := tc.Schema.DefaultOrZero()
+ if !reflect.DeepEqual(actual, tc.Value) {
+ t.Fatalf("bad: %s\n\nExpected: %#v\nGot: %#v",
+ name, tc.Value, actual)
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/field_data.go b/vendor/github.com/hashicorp/vault/logical/framework/field_data.go
new file mode 100644
index 0000000..9783802
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/framework/field_data.go
@@ -0,0 +1,216 @@
+package framework
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/hashicorp/vault/helper/parseutil"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/mitchellh/mapstructure"
+)
+
+// FieldData is the structure passed to the callback to handle a path
+// containing the populated parameters for fields. This should be used
+// instead of the raw (*vault.Request).Data to access data in a type-safe
+// way.
+type FieldData struct {
+ Raw map[string]interface{}
+ Schema map[string]*FieldSchema
+}
+
+// Cycle through raw data and validate conversions in
+// the schema, so we don't get an error/panic later when
+// trying to get data out. Data not in the schema is not
+// an error at this point, so we don't worry about it.
+func (d *FieldData) Validate() error {
+ for field, value := range d.Raw {
+
+ schema, ok := d.Schema[field]
+ if !ok {
+ continue
+ }
+
+ switch schema.Type {
+ case TypeBool, TypeInt, TypeMap, TypeDurationSecond, TypeString, TypeSlice,
+ TypeStringSlice, TypeCommaStringSlice:
+ _, _, err := d.getPrimitive(field, schema)
+ if err != nil {
+ return fmt.Errorf("Error converting input %v for field %s: %s", value, field, err)
+ }
+ default:
+ return fmt.Errorf("unknown field type %s for field %s",
+ schema.Type, field)
+ }
+ }
+
+ return nil
+}
+
+// Get gets the value for the given field. If the key is an invalid field,
+// FieldData will panic. If you want a safer version of this method, use
+// GetOk. If the field k is not set, the default value (if set) will be
+// returned, otherwise the zero value will be returned.
+func (d *FieldData) Get(k string) interface{} {
+ schema, ok := d.Schema[k]
+ if !ok {
+ panic(fmt.Sprintf("field %s not in the schema", k))
+ }
+
+ value, ok := d.GetOk(k)
+ if !ok {
+ value = schema.DefaultOrZero()
+ }
+
+ return value
+}
+
+// GetDefaultOrZero gets the default value set on the schema for the given
+// field. If there is no default value set, the zero value of the type
+// will be returned.
+func (d *FieldData) GetDefaultOrZero(k string) interface{} {
+ schema, ok := d.Schema[k]
+ if !ok {
+ panic(fmt.Sprintf("field %s not in the schema", k))
+ }
+
+ return schema.DefaultOrZero()
+}
+
+// GetOk gets the value for the given field. The second return value
+// will be false if the key is invalid or the key is not set at all.
+func (d *FieldData) GetOk(k string) (interface{}, bool) {
+ schema, ok := d.Schema[k]
+ if !ok {
+ return nil, false
+ }
+
+ result, ok, err := d.GetOkErr(k)
+ if err != nil {
+ panic(fmt.Sprintf("error reading %s: %s", k, err))
+ }
+
+ if ok && result == nil {
+ result = schema.DefaultOrZero()
+ }
+
+ return result, ok
+}
+
+// GetOkErr is the most conservative of all the Get methods. It returns
+// whether key is set or not, but also an error value. The error value is
+// non-nil if the field doesn't exist or there was an error parsing the
+// field value.
+func (d *FieldData) GetOkErr(k string) (interface{}, bool, error) {
+ schema, ok := d.Schema[k]
+ if !ok {
+ return nil, false, fmt.Errorf("unknown field: %s", k)
+ }
+
+ switch schema.Type {
+ case TypeBool, TypeInt, TypeMap, TypeDurationSecond, TypeString,
+ TypeSlice, TypeStringSlice, TypeCommaStringSlice:
+ return d.getPrimitive(k, schema)
+ default:
+ return nil, false,
+ fmt.Errorf("unknown field type %s for field %s", schema.Type, k)
+ }
+}
+
+func (d *FieldData) getPrimitive(
+ k string, schema *FieldSchema) (interface{}, bool, error) {
+ raw, ok := d.Raw[k]
+ if !ok {
+ return nil, false, nil
+ }
+
+ switch schema.Type {
+ case TypeBool:
+ var result bool
+ if err := mapstructure.WeakDecode(raw, &result); err != nil {
+ return nil, true, err
+ }
+ return result, true, nil
+
+ case TypeInt:
+ var result int
+ if err := mapstructure.WeakDecode(raw, &result); err != nil {
+ return nil, true, err
+ }
+ return result, true, nil
+
+ case TypeString:
+ var result string
+ if err := mapstructure.WeakDecode(raw, &result); err != nil {
+ return nil, true, err
+ }
+ return result, true, nil
+
+ case TypeMap:
+ var result map[string]interface{}
+ if err := mapstructure.WeakDecode(raw, &result); err != nil {
+ return nil, true, err
+ }
+ return result, true, nil
+
+ case TypeDurationSecond:
+ var result int
+ switch inp := raw.(type) {
+ case nil:
+ return nil, false, nil
+ case int:
+ result = inp
+ case float32:
+ result = int(inp)
+ case float64:
+ result = int(inp)
+ case string:
+ dur, err := parseutil.ParseDurationSecond(inp)
+ if err != nil {
+ return nil, true, err
+ }
+ result = int(dur.Seconds())
+ case json.Number:
+ valInt64, err := inp.Int64()
+ if err != nil {
+ return nil, true, err
+ }
+ result = int(valInt64)
+ default:
+ return nil, false, fmt.Errorf("invalid input '%v'", raw)
+ }
+ return result, true, nil
+
+ case TypeSlice:
+ var result []interface{}
+ if err := mapstructure.WeakDecode(raw, &result); err != nil {
+ return nil, true, err
+ }
+ return result, true, nil
+
+ case TypeStringSlice:
+ var result []string
+ if err := mapstructure.WeakDecode(raw, &result); err != nil {
+ return nil, true, err
+ }
+ return strutil.TrimStrings(result), true, nil
+
+ case TypeCommaStringSlice:
+ var result []string
+ config := &mapstructure.DecoderConfig{
+ Result: &result,
+ WeaklyTypedInput: true,
+ DecodeHook: mapstructure.StringToSliceHookFunc(","),
+ }
+ decoder, err := mapstructure.NewDecoder(config)
+ if err != nil {
+ return nil, false, err
+ }
+ if err := decoder.Decode(raw); err != nil {
+ return nil, false, err
+ }
+ return strutil.TrimStrings(result), true, nil
+
+ default:
+ panic(fmt.Sprintf("Unknown type: %s", schema.Type))
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/field_data_test.go b/vendor/github.com/hashicorp/vault/logical/framework/field_data_test.go
new file mode 100644
index 0000000..a801f9c
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/framework/field_data_test.go
@@ -0,0 +1,263 @@
+package framework
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestFieldDataGet(t *testing.T) {
+ cases := map[string]struct {
+ Schema map[string]*FieldSchema
+ Raw map[string]interface{}
+ Key string
+ Value interface{}
+ }{
+ "string type, string value": {
+ map[string]*FieldSchema{
+ "foo": &FieldSchema{Type: TypeString},
+ },
+ map[string]interface{}{
+ "foo": "bar",
+ },
+ "foo",
+ "bar",
+ },
+
+ "string type, int value": {
+ map[string]*FieldSchema{
+ "foo": &FieldSchema{Type: TypeString},
+ },
+ map[string]interface{}{
+ "foo": 42,
+ },
+ "foo",
+ "42",
+ },
+
+ "string type, unset value": {
+ map[string]*FieldSchema{
+ "foo": &FieldSchema{Type: TypeString},
+ },
+ map[string]interface{}{},
+ "foo",
+ "",
+ },
+
+ "string type, unset value with default": {
+ map[string]*FieldSchema{
+ "foo": &FieldSchema{
+ Type: TypeString,
+ Default: "bar",
+ },
+ },
+ map[string]interface{}{},
+ "foo",
+ "bar",
+ },
+
+ "int type, int value": {
+ map[string]*FieldSchema{
+ "foo": &FieldSchema{Type: TypeInt},
+ },
+ map[string]interface{}{
+ "foo": 42,
+ },
+ "foo",
+ 42,
+ },
+
+ "bool type, bool value": {
+ map[string]*FieldSchema{
+ "foo": &FieldSchema{Type: TypeBool},
+ },
+ map[string]interface{}{
+ "foo": false,
+ },
+ "foo",
+ false,
+ },
+
+ "map type, map value": {
+ map[string]*FieldSchema{
+ "foo": &FieldSchema{Type: TypeMap},
+ },
+ map[string]interface{}{
+ "foo": map[string]interface{}{
+ "child": true,
+ },
+ },
+ "foo",
+ map[string]interface{}{
+ "child": true,
+ },
+ },
+
+ "duration type, string value": {
+ map[string]*FieldSchema{
+ "foo": &FieldSchema{Type: TypeDurationSecond},
+ },
+ map[string]interface{}{
+ "foo": "42",
+ },
+ "foo",
+ 42,
+ },
+
+ "duration type, string duration value": {
+ map[string]*FieldSchema{
+ "foo": &FieldSchema{Type: TypeDurationSecond},
+ },
+ map[string]interface{}{
+ "foo": "42m",
+ },
+ "foo",
+ 2520,
+ },
+
+ "duration type, int value": {
+ map[string]*FieldSchema{
+ "foo": &FieldSchema{Type: TypeDurationSecond},
+ },
+ map[string]interface{}{
+ "foo": 42,
+ },
+ "foo",
+ 42,
+ },
+
+ "duration type, float value": {
+ map[string]*FieldSchema{
+ "foo": &FieldSchema{Type: TypeDurationSecond},
+ },
+ map[string]interface{}{
+ "foo": 42.0,
+ },
+ "foo",
+ 42,
+ },
+
+ "duration type, nil value": {
+ map[string]*FieldSchema{
+ "foo": &FieldSchema{Type: TypeDurationSecond},
+ },
+ map[string]interface{}{
+ "foo": nil,
+ },
+ "foo",
+ 0,
+ },
+
+ "slice type, empty slice": {
+ map[string]*FieldSchema{
+ "foo": &FieldSchema{Type: TypeSlice},
+ },
+ map[string]interface{}{
+ "foo": []interface{}{},
+ },
+ "foo",
+ []interface{}{},
+ },
+
+ "slice type, filled, mixed slice": {
+ map[string]*FieldSchema{
+ "foo": &FieldSchema{Type: TypeSlice},
+ },
+ map[string]interface{}{
+ "foo": []interface{}{123, "abc"},
+ },
+ "foo",
+ []interface{}{123, "abc"},
+ },
+
+ "string slice type, filled slice": {
+ map[string]*FieldSchema{
+ "foo": &FieldSchema{Type: TypeStringSlice},
+ },
+ map[string]interface{}{
+ "foo": []interface{}{123, "abc"},
+ },
+ "foo",
+ []string{"123", "abc"},
+ },
+
+ "comma string slice type, comma string with one value": {
+ map[string]*FieldSchema{
+ "foo": &FieldSchema{Type: TypeCommaStringSlice},
+ },
+ map[string]interface{}{
+ "foo": "value1",
+ },
+ "foo",
+ []string{"value1"},
+ },
+
+ "comma string slice type, comma string with multi value": {
+ map[string]*FieldSchema{
+ "foo": &FieldSchema{Type: TypeCommaStringSlice},
+ },
+ map[string]interface{}{
+ "foo": "value1,value2,value3",
+ },
+ "foo",
+ []string{"value1", "value2", "value3"},
+ },
+
+ "comma string slice type, nil string slice value": {
+ map[string]*FieldSchema{
+ "foo": &FieldSchema{Type: TypeCommaStringSlice},
+ },
+ map[string]interface{}{
+ "foo": "",
+ },
+ "foo",
+ []string{},
+ },
+
+ "commma string slice type, string slice with one value": {
+ map[string]*FieldSchema{
+ "foo": &FieldSchema{Type: TypeCommaStringSlice},
+ },
+ map[string]interface{}{
+ "foo": []interface{}{"value1"},
+ },
+ "foo",
+ []string{"value1"},
+ },
+
+ "comma string slice type, string slice with multi value": {
+ map[string]*FieldSchema{
+ "foo": &FieldSchema{Type: TypeCommaStringSlice},
+ },
+ map[string]interface{}{
+ "foo": []interface{}{"value1", "value2", "value3"},
+ },
+ "foo",
+ []string{"value1", "value2", "value3"},
+ },
+
+ "comma string slice type, empty string slice value": {
+ map[string]*FieldSchema{
+ "foo": &FieldSchema{Type: TypeCommaStringSlice},
+ },
+ map[string]interface{}{
+ "foo": []interface{}{},
+ },
+ "foo",
+ []string{},
+ },
+ }
+
+ for name, tc := range cases {
+ data := &FieldData{
+ Raw: tc.Raw,
+ Schema: tc.Schema,
+ }
+
+ actual := data.Get(tc.Key)
+ if !reflect.DeepEqual(actual, tc.Value) {
+ t.Fatalf(
+ "bad: %s\n\nExpected: %#v\nGot: %#v",
+ name, tc.Value, actual)
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/field_type.go b/vendor/github.com/hashicorp/vault/logical/framework/field_type.go
new file mode 100644
index 0000000..034d0fe
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/framework/field_type.go
@@ -0,0 +1,45 @@
+package framework
+
+// FieldType is the enum of types that a field can be.
+type FieldType uint
+
+const (
+ TypeInvalid FieldType = 0
+ TypeString FieldType = iota
+ TypeInt
+ TypeBool
+ TypeMap
+
+ // TypeDurationSecond represent as seconds, this can be either an
+ // integer or go duration format string (e.g. 24h)
+ TypeDurationSecond
+
+ // TypeSlice represents a slice of any type
+ TypeSlice
+ // TypeStringSlice is a helper for TypeSlice that returns a sanitized
+ // slice of strings
+ TypeStringSlice
+ // TypeCommaStringSlice is a helper for TypeSlice that returns a sanitized
+ // slice of strings and also supports parsing a comma-separated list in
+ // a string field
+ TypeCommaStringSlice
+)
+
+func (t FieldType) String() string {
+ switch t {
+ case TypeString:
+ return "string"
+ case TypeInt:
+ return "int"
+ case TypeBool:
+ return "bool"
+ case TypeMap:
+ return "map"
+ case TypeDurationSecond:
+ return "duration (sec)"
+ case TypeSlice, TypeStringSlice, TypeCommaStringSlice:
+ return "slice"
+ default:
+ return "unknown type"
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/lease.go b/vendor/github.com/hashicorp/vault/logical/framework/lease.go
new file mode 100644
index 0000000..4fd2ac9
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/framework/lease.go
@@ -0,0 +1,85 @@
+package framework
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+// LeaseExtend returns an OperationFunc that can be used to simply extend the
+// lease of the auth/secret for the duration that was requested.
+//
+// backendIncrement is the backend's requested increment -- perhaps from a user
+// request, perhaps from a role/config value. If not set, uses the mount/system
+// value.
+//
+// backendMax is the backend's requested increment -- this can be more
+// restrictive than the mount/system value but not less.
+//
+// systemView is the system view from the calling backend, used to determine
+// and/or correct default/max times.
+func LeaseExtend(backendIncrement, backendMax time.Duration, systemView logical.SystemView) OperationFunc {
+ return func(req *logical.Request, data *FieldData) (*logical.Response, error) {
+ var leaseOpts *logical.LeaseOptions
+ switch {
+ case req.Auth != nil:
+ leaseOpts = &req.Auth.LeaseOptions
+ case req.Secret != nil:
+ leaseOpts = &req.Secret.LeaseOptions
+ default:
+ return nil, fmt.Errorf("no lease options for request")
+ }
+
+ // Use the mount's configured max unless the backend specifies
+ // something more restrictive (perhaps from a role configuration
+ // parameter)
+ max := systemView.MaxLeaseTTL()
+ if backendMax > 0 && backendMax < max {
+ max = backendMax
+ }
+
+ // Should never happen, but guard anyways
+ if max < 0 {
+ return nil, fmt.Errorf("max TTL is negative")
+ }
+
+ // We cannot go past this time
+ maxValidTime := leaseOpts.IssueTime.Add(max)
+
+ // Get the current time
+ now := time.Now()
+
+ // If we are past the max TTL, we shouldn't be in this function...but
+ // fast path out if we are
+ if maxValidTime.Before(now) {
+ return nil, fmt.Errorf("past the max TTL, cannot renew")
+ }
+
+ // Basic max safety checks have passed, now let's figure out our
+ // increment. We'll use the user-supplied value first, then backend-provided default if possible, or the
+ // mount/system default if not.
+ increment := leaseOpts.Increment
+ if increment <= 0 {
+ if backendIncrement > 0 {
+ increment = backendIncrement
+ } else {
+ increment = systemView.DefaultLeaseTTL()
+ }
+ }
+
+ // We are proposing a time of the current time plus the increment
+ proposedExpiration := now.Add(increment)
+
+ // If the proposed expiration is after the maximum TTL of the lease,
+ // cap the increment to whatever is left
+ if maxValidTime.Before(proposedExpiration) {
+ increment = maxValidTime.Sub(now)
+ }
+
+ // Set the lease
+ leaseOpts.TTL = increment
+
+ return &logical.Response{Auth: req.Auth, Secret: req.Secret}, nil
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/lease_test.go b/vendor/github.com/hashicorp/vault/logical/framework/lease_test.go
new file mode 100644
index 0000000..b45b9c7
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/framework/lease_test.go
@@ -0,0 +1,114 @@
+package framework
+
+import (
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestLeaseExtend(t *testing.T) {
+
+ testSysView := logical.StaticSystemView{
+ DefaultLeaseTTLVal: 5 * time.Hour,
+ MaxLeaseTTLVal: 30 * time.Hour,
+ }
+
+ now := time.Now().Round(time.Hour)
+
+ cases := map[string]struct {
+ BackendDefault time.Duration
+ BackendMax time.Duration
+ Increment time.Duration
+ Result time.Duration
+ Error bool
+ }{
+ "valid request, good bounds, increment is preferred": {
+ BackendDefault: 30 * time.Hour,
+ Increment: 1 * time.Hour,
+ Result: 1 * time.Hour,
+ },
+
+ "valid request, zero backend default, uses increment": {
+ BackendDefault: 0,
+ Increment: 1 * time.Hour,
+ Result: 1 * time.Hour,
+ },
+
+ "lease increment is zero, uses backend default": {
+ BackendDefault: 30 * time.Hour,
+ Increment: 0,
+ Result: 30 * time.Hour,
+ },
+
+ "lease increment and default are zero, uses systemview": {
+ BackendDefault: 0,
+ Increment: 0,
+ Result: 5 * time.Hour,
+ },
+
+ "backend max and associated request are too long": {
+ BackendDefault: 40 * time.Hour,
+ BackendMax: 45 * time.Hour,
+ Result: 30 * time.Hour,
+ },
+
+ "all request values are larger than the system view, so the system view limits": {
+ BackendDefault: 40 * time.Hour,
+ BackendMax: 50 * time.Hour,
+ Increment: 40 * time.Hour,
+ Result: 30 * time.Hour,
+ },
+
+ "request within backend max": {
+ BackendDefault: 9 * time.Hour,
+ BackendMax: 5 * time.Hour,
+ Increment: 4 * time.Hour,
+ Result: 4 * time.Hour,
+ },
+
+ "request outside backend max": {
+ BackendDefault: 9 * time.Hour,
+ BackendMax: 4 * time.Hour,
+ Increment: 5 * time.Hour,
+ Result: 4 * time.Hour,
+ },
+
+ "request is negative, no backend default, use sysview": {
+ Increment: -7 * time.Hour,
+ Result: 5 * time.Hour,
+ },
+
+ "lease increment too large": {
+ Increment: 40 * time.Hour,
+ Result: 30 * time.Hour,
+ },
+ }
+
+ for name, tc := range cases {
+ req := &logical.Request{
+ Auth: &logical.Auth{
+ LeaseOptions: logical.LeaseOptions{
+ TTL: 1 * time.Hour,
+ IssueTime: now,
+ Increment: tc.Increment,
+ },
+ },
+ }
+
+ callback := LeaseExtend(tc.BackendDefault, tc.BackendMax, testSysView)
+ resp, err := callback(req, nil)
+ if (err != nil) != tc.Error {
+ t.Fatalf("bad: %s\nerr: %s", name, err)
+ }
+ if tc.Error {
+ continue
+ }
+
+ // Round it to the nearest hour
+ lease := now.Add(resp.Auth.TTL).Round(time.Hour).Sub(now)
+ if lease != tc.Result {
+ t.Fatalf("bad: %s\nlease: %s", name, lease)
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/path.go b/vendor/github.com/hashicorp/vault/logical/framework/path.go
new file mode 100644
index 0000000..fa0c037
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/framework/path.go
@@ -0,0 +1,160 @@
+package framework
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+// Helper which returns a generic regex string for creating endpoint patterns
+// that are identified by the given name in the backends
+func GenericNameRegex(name string) string {
+ return fmt.Sprintf("(?P<%s>\\w(([\\w-.]+)?\\w)?)", name)
+}
+
+// Helper which returns a regex string for optionally accepting the a field
+// from the API URL
+func OptionalParamRegex(name string) string {
+ return fmt.Sprintf("(/(?P<%s>.+))?", name)
+}
+
+// PathAppend is a helper for appending lists of paths into a single
+// list.
+func PathAppend(paths ...[]*Path) []*Path {
+ result := make([]*Path, 0, 10)
+ for _, ps := range paths {
+ result = append(result, ps...)
+ }
+
+ return result
+}
+
+// Path is a single path that the backend responds to.
+type Path struct {
+ // Pattern is the pattern of the URL that matches this path.
+ //
+ // This should be a valid regular expression. Named captures will be
+ // exposed as fields that should map to a schema in Fields. If a named
+ // capture is not a field in the Fields map, then it will be ignored.
+ Pattern string
+
+ // Fields is the mapping of data fields to a schema describing that
+ // field. Named captures in the Pattern also map to fields. If a named
+ // capture name matches a PUT body name, the named capture takes
+ // priority.
+ //
+ // Note that only named capture fields are available in every operation,
+ // whereas all fields are available in the Write operation.
+ Fields map[string]*FieldSchema
+
+ // Callbacks are the set of callbacks that are called for a given
+ // operation. If a callback for a specific operation is not present,
+ // then logical.ErrUnsupportedOperation is automatically generated.
+ //
+ // The help operation is the only operation that the Path will
+ // automatically handle if the Help field is set. If both the Help
+ // field is set and there is a callback registered here, then the
+ // callback will be called.
+ Callbacks map[logical.Operation]OperationFunc
+
+ // ExistenceCheck, if implemented, is used to query whether a given
+ // resource exists or not. This is used for ACL purposes: if an Update
+ // action is specified, and the existence check returns false, the action
+ // is not allowed since the resource must first be created. The reverse is
+ // also true. If not specified, the Update action is forced and the user
+ // must have UpdateCapability on the path.
+ ExistenceCheck func(*logical.Request, *FieldData) (bool, error)
+
+ // Help is text describing how to use this path. This will be used
+ // to auto-generate the help operation. The Path will automatically
+ // generate a parameter listing and URL structure based on the
+ // regular expression, so the help text should just contain a description
+ // of what happens.
+ //
+ // HelpSynopsis is a one-sentence description of the path. This will
+ // be automatically line-wrapped at 80 characters.
+ //
+ // HelpDescription is a long-form description of the path. This will
+ // be automatically line-wrapped at 80 characters.
+ HelpSynopsis string
+ HelpDescription string
+}
+
+func (p *Path) helpCallback(
+ req *logical.Request, data *FieldData) (*logical.Response, error) {
+ var tplData pathTemplateData
+ tplData.Request = req.Path
+ tplData.RoutePattern = p.Pattern
+ tplData.Synopsis = strings.TrimSpace(p.HelpSynopsis)
+ if tplData.Synopsis == "" {
+ tplData.Synopsis = ""
+ }
+ tplData.Description = strings.TrimSpace(p.HelpDescription)
+ if tplData.Description == "" {
+ tplData.Description = ""
+ }
+
+ // Alphabetize the fields
+ fieldKeys := make([]string, 0, len(p.Fields))
+ for k, _ := range p.Fields {
+ fieldKeys = append(fieldKeys, k)
+ }
+ sort.Strings(fieldKeys)
+
+ // Build the field help
+ tplData.Fields = make([]pathTemplateFieldData, len(fieldKeys))
+ for i, k := range fieldKeys {
+ schema := p.Fields[k]
+ description := strings.TrimSpace(schema.Description)
+ if description == "" {
+ description = ""
+ }
+
+ tplData.Fields[i] = pathTemplateFieldData{
+ Key: k,
+ Type: schema.Type.String(),
+ Description: description,
+ }
+ }
+
+ help, err := executeTemplate(pathHelpTemplate, &tplData)
+ if err != nil {
+ return nil, fmt.Errorf("error executing template: %s", err)
+ }
+
+ return logical.HelpResponse(help, nil), nil
+}
+
+type pathTemplateData struct {
+ Request string
+ RoutePattern string
+ Synopsis string
+ Description string
+ Fields []pathTemplateFieldData
+}
+
+type pathTemplateFieldData struct {
+ Key string
+ Type string
+ Description string
+ URL bool
+}
+
+const pathHelpTemplate = `
+Request: {{.Request}}
+Matching Route: {{.RoutePattern}}
+
+{{.Synopsis}}
+
+{{ if .Fields -}}
+## PARAMETERS
+{{range .Fields}}
+{{indent 4 .Key}} ({{.Type}})
+{{indent 8 .Description}}
+{{end}}{{end}}
+## DESCRIPTION
+
+{{.Description}}
+`
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/path_map.go b/vendor/github.com/hashicorp/vault/logical/framework/path_map.go
new file mode 100644
index 0000000..ff1d277
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/framework/path_map.go
@@ -0,0 +1,179 @@
+package framework
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+
+ "github.com/hashicorp/vault/helper/salt"
+ "github.com/hashicorp/vault/logical"
+)
+
+// PathMap can be used to generate a path that stores mappings in the
+// storage. It is a structure that also exports functions for querying the
+// mappings.
+//
+// The primary use case for this is for credential providers to do their
+// mapping to policies.
+type PathMap struct {
+ Prefix string
+ Name string
+ Schema map[string]*FieldSchema
+ CaseSensitive bool
+ Salt *salt.Salt
+
+ once sync.Once
+}
+
+func (p *PathMap) init() {
+ if p.Prefix == "" {
+ p.Prefix = "map"
+ }
+
+ if p.Schema == nil {
+ p.Schema = map[string]*FieldSchema{
+ "value": &FieldSchema{
+ Type: TypeString,
+ Description: fmt.Sprintf("Value for %s mapping", p.Name),
+ },
+ }
+ }
+}
+
+// pathStruct returns the pathStruct for this mapping
+func (p *PathMap) pathStruct(k string) *PathStruct {
+ p.once.Do(p.init)
+
+ // If we don't care about casing, store everything lowercase
+ if !p.CaseSensitive {
+ k = strings.ToLower(k)
+ }
+
+ // If we have a salt, apply it before lookup
+ if p.Salt != nil {
+ k = p.Salt.SaltID(k)
+ }
+
+ return &PathStruct{
+ Name: fmt.Sprintf("map/%s/%s", p.Name, k),
+ Schema: p.Schema,
+ }
+}
+
+// Get reads a value out of the mapping
+func (p *PathMap) Get(s logical.Storage, k string) (map[string]interface{}, error) {
+ return p.pathStruct(k).Get(s)
+}
+
+// Put writes a value into the mapping
+func (p *PathMap) Put(s logical.Storage, k string, v map[string]interface{}) error {
+ return p.pathStruct(k).Put(s, v)
+}
+
+// Delete removes a value from the mapping
+func (p *PathMap) Delete(s logical.Storage, k string) error {
+ return p.pathStruct(k).Delete(s)
+}
+
+// List reads the keys under a given path
+func (p *PathMap) List(s logical.Storage, prefix string) ([]string, error) {
+ stripPrefix := fmt.Sprintf("struct/map/%s/", p.Name)
+ fullPrefix := fmt.Sprintf("%s%s", stripPrefix, prefix)
+ out, err := s.List(fullPrefix)
+ if err != nil {
+ return nil, err
+ }
+ stripped := make([]string, len(out))
+ for idx, k := range out {
+ stripped[idx] = strings.TrimPrefix(k, stripPrefix)
+ }
+ return stripped, nil
+}
+
+// Paths are the paths to append to the Backend paths.
+func (p *PathMap) Paths() []*Path {
+ p.once.Do(p.init)
+
+ // Build the schema by simply adding the "key"
+ schema := make(map[string]*FieldSchema)
+ for k, v := range p.Schema {
+ schema[k] = v
+ }
+ schema["key"] = &FieldSchema{
+ Type: TypeString,
+ Description: fmt.Sprintf("Key for the %s mapping", p.Name),
+ }
+
+ return []*Path{
+ &Path{
+ Pattern: fmt.Sprintf("%s/%s/?$", p.Prefix, p.Name),
+
+ Callbacks: map[logical.Operation]OperationFunc{
+ logical.ListOperation: p.pathList,
+ logical.ReadOperation: p.pathList,
+ },
+
+ HelpSynopsis: fmt.Sprintf("Read mappings for %s", p.Name),
+ },
+
+ &Path{
+ Pattern: fmt.Sprintf(`%s/%s/(?P[-\w]+)`, p.Prefix, p.Name),
+
+ Fields: schema,
+
+ Callbacks: map[logical.Operation]OperationFunc{
+ logical.CreateOperation: p.pathSingleWrite,
+ logical.ReadOperation: p.pathSingleRead,
+ logical.UpdateOperation: p.pathSingleWrite,
+ logical.DeleteOperation: p.pathSingleDelete,
+ },
+
+ HelpSynopsis: fmt.Sprintf("Read/write/delete a single %s mapping", p.Name),
+
+ ExistenceCheck: p.pathSingleExistenceCheck,
+ },
+ }
+}
+
+func (p *PathMap) pathList(
+ req *logical.Request, d *FieldData) (*logical.Response, error) {
+ keys, err := p.List(req.Storage, "")
+ if err != nil {
+ return nil, err
+ }
+
+ return logical.ListResponse(keys), nil
+}
+
+func (p *PathMap) pathSingleRead(
+ req *logical.Request, d *FieldData) (*logical.Response, error) {
+ v, err := p.Get(req.Storage, d.Get("key").(string))
+ if err != nil {
+ return nil, err
+ }
+
+ return &logical.Response{
+ Data: v,
+ }, nil
+}
+
+func (p *PathMap) pathSingleWrite(
+ req *logical.Request, d *FieldData) (*logical.Response, error) {
+ err := p.Put(req.Storage, d.Get("key").(string), d.Raw)
+ return nil, err
+}
+
+func (p *PathMap) pathSingleDelete(
+ req *logical.Request, d *FieldData) (*logical.Response, error) {
+ err := p.Delete(req.Storage, d.Get("key").(string))
+ return nil, err
+}
+
+func (p *PathMap) pathSingleExistenceCheck(
+ req *logical.Request, d *FieldData) (bool, error) {
+ v, err := p.Get(req.Storage, d.Get("key").(string))
+ if err != nil {
+ return false, err
+ }
+ return v != nil, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/path_map_test.go b/vendor/github.com/hashicorp/vault/logical/framework/path_map_test.go
new file mode 100644
index 0000000..7d30d7d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/framework/path_map_test.go
@@ -0,0 +1,257 @@
+package framework
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/helper/salt"
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestPathMap(t *testing.T) {
+ p := &PathMap{Name: "foo"}
+ storage := new(logical.InmemStorage)
+ var b logical.Backend = &Backend{Paths: p.Paths()}
+
+ // Write via HTTP
+ _, err := b.HandleRequest(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "map/foo/a",
+ Data: map[string]interface{}{
+ "value": "bar",
+ },
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+
+ // Read via HTTP
+ resp, err := b.HandleRequest(&logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "map/foo/a",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+ if resp.Data["value"] != "bar" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Read via API
+ v, err := p.Get(storage, "a")
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+ if v["value"] != "bar" {
+ t.Fatalf("bad: %#v", v)
+ }
+
+ // Read via API with other casing
+ v, err = p.Get(storage, "A")
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+ if v["value"] != "bar" {
+ t.Fatalf("bad: %#v", v)
+ }
+
+ // Verify List
+ keys, err := p.List(storage, "")
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+ if len(keys) != 1 || keys[0] != "a" {
+ t.Fatalf("bad: %#v", keys)
+ }
+
+ // LIST via HTTP
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.ListOperation,
+ Path: "map/foo/",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+ if len(resp.Data) != 1 || len(resp.Data["keys"].([]string)) != 1 ||
+ resp.Data["keys"].([]string)[0] != "a" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Delete via HTTP
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.DeleteOperation,
+ Path: "map/foo/a",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Re-read via HTTP
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "map/foo/a",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+ if _, ok := resp.Data["value"]; ok {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Re-read via API
+ v, err = p.Get(storage, "a")
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+ if v != nil {
+ t.Fatalf("bad: %#v", v)
+ }
+}
+
+func TestPathMap_getInvalid(t *testing.T) {
+ p := &PathMap{Name: "foo"}
+ storage := new(logical.InmemStorage)
+
+ v, err := p.Get(storage, "nope")
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+ if v != nil {
+ t.Fatalf("bad: %#v", v)
+ }
+}
+
+func TestPathMap_routes(t *testing.T) {
+ p := &PathMap{Name: "foo"}
+ TestBackendRoutes(t, &Backend{Paths: p.Paths()}, []string{
+ "map/foo", // Normal
+ "map/foo/bar", // Normal
+ "map/foo/bar-baz", // Hyphen key
+ })
+}
+
+func TestPathMap_Salted(t *testing.T) {
+ storage := new(logical.InmemStorage)
+ salt, err := salt.NewSalt(storage, &salt.Config{
+ HashFunc: salt.SHA1Hash,
+ })
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ p := &PathMap{Name: "foo", Salt: salt}
+ var b logical.Backend = &Backend{Paths: p.Paths()}
+
+ // Write via HTTP
+ _, err = b.HandleRequest(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "map/foo/a",
+ Data: map[string]interface{}{
+ "value": "bar",
+ },
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+
+ // Non-salted version should not be there
+ out, err := storage.Get("struct/map/foo/a")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out != nil {
+ t.Fatalf("non-salted key found")
+ }
+
+ // Ensure the path is salted
+ expect := salt.SaltID("a")
+ out, err = storage.Get("struct/map/foo/" + expect)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out == nil {
+ t.Fatalf("missing salted key")
+ }
+
+ // Read via HTTP
+ resp, err := b.HandleRequest(&logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "map/foo/a",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+ if resp.Data["value"] != "bar" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Read via API
+ v, err := p.Get(storage, "a")
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+ if v["value"] != "bar" {
+ t.Fatalf("bad: %#v", v)
+ }
+
+ // Read via API with other casing
+ v, err = p.Get(storage, "A")
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+ if v["value"] != "bar" {
+ t.Fatalf("bad: %#v", v)
+ }
+
+ // Verify List
+ keys, err := p.List(storage, "")
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+ if len(keys) != 1 || keys[0] != expect {
+ t.Fatalf("bad: %#v", keys)
+ }
+
+ // Delete via HTTP
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.DeleteOperation,
+ Path: "map/foo/a",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Re-read via HTTP
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "map/foo/a",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+ if _, ok := resp.Data["value"]; ok {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Re-read via API
+ v, err = p.Get(storage, "a")
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+ if v != nil {
+ t.Fatalf("bad: %#v", v)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/path_struct.go b/vendor/github.com/hashicorp/vault/logical/framework/path_struct.go
new file mode 100644
index 0000000..ae4f8d2
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/framework/path_struct.go
@@ -0,0 +1,119 @@
+package framework
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/logical"
+)
+
+// PathStruct can be used to generate a path that stores a struct
+// in the storage. This structure is a map[string]interface{} but the
+// types are set according to the schema in this structure.
+type PathStruct struct {
+ Name string
+ Path string
+ Schema map[string]*FieldSchema
+ HelpSynopsis string
+ HelpDescription string
+
+ Read bool
+}
+
+// Get reads the structure.
+func (p *PathStruct) Get(s logical.Storage) (map[string]interface{}, error) {
+ entry, err := s.Get(fmt.Sprintf("struct/%s", p.Name))
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result map[string]interface{}
+ if err := jsonutil.DecodeJSON(entry.Value, &result); err != nil {
+ return nil, err
+ }
+
+ return result, nil
+}
+
+// Put writes the structure.
+func (p *PathStruct) Put(s logical.Storage, v map[string]interface{}) error {
+ bytes, err := json.Marshal(v)
+ if err != nil {
+ return err
+ }
+
+ return s.Put(&logical.StorageEntry{
+ Key: fmt.Sprintf("struct/%s", p.Name),
+ Value: bytes,
+ })
+}
+
+// Delete removes the structure.
+func (p *PathStruct) Delete(s logical.Storage) error {
+ return s.Delete(fmt.Sprintf("struct/%s", p.Name))
+}
+
+// Paths are the paths to append to the Backend paths.
+func (p *PathStruct) Paths() []*Path {
+ // The single path we support to read/write this config
+ path := &Path{
+ Pattern: p.Path,
+ Fields: p.Schema,
+
+ Callbacks: map[logical.Operation]OperationFunc{
+ logical.CreateOperation: p.pathWrite,
+ logical.UpdateOperation: p.pathWrite,
+ logical.DeleteOperation: p.pathDelete,
+ },
+
+ ExistenceCheck: p.pathExistenceCheck,
+
+ HelpSynopsis: p.HelpSynopsis,
+ HelpDescription: p.HelpDescription,
+ }
+
+ // If we support reads, add that
+ if p.Read {
+ path.Callbacks[logical.ReadOperation] = p.pathRead
+ }
+
+ return []*Path{path}
+}
+
+func (p *PathStruct) pathRead(
+ req *logical.Request, d *FieldData) (*logical.Response, error) {
+ v, err := p.Get(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+
+ return &logical.Response{
+ Data: v,
+ }, nil
+}
+
+func (p *PathStruct) pathWrite(
+ req *logical.Request, d *FieldData) (*logical.Response, error) {
+ err := p.Put(req.Storage, d.Raw)
+ return nil, err
+}
+
+func (p *PathStruct) pathDelete(
+ req *logical.Request, d *FieldData) (*logical.Response, error) {
+ err := p.Delete(req.Storage)
+ return nil, err
+}
+
+func (p *PathStruct) pathExistenceCheck(
+ req *logical.Request, d *FieldData) (bool, error) {
+ v, err := p.Get(req.Storage)
+ if err != nil {
+ return false, err
+ }
+
+ return v != nil, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/path_struct_test.go b/vendor/github.com/hashicorp/vault/logical/framework/path_struct_test.go
new file mode 100644
index 0000000..48233d3
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/framework/path_struct_test.go
@@ -0,0 +1,92 @@
+package framework
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestPathStruct(t *testing.T) {
+ p := &PathStruct{
+ Name: "foo",
+ Path: "bar",
+ Schema: map[string]*FieldSchema{
+ "value": &FieldSchema{Type: TypeString},
+ },
+ Read: true,
+ }
+
+ storage := new(logical.InmemStorage)
+ var b logical.Backend = &Backend{Paths: p.Paths()}
+
+ // Write via HTTP
+ _, err := b.HandleRequest(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "bar",
+ Data: map[string]interface{}{
+ "value": "baz",
+ },
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+
+ // Read via HTTP
+ resp, err := b.HandleRequest(&logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "bar",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+ if resp.Data["value"] != "baz" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Read via API
+ v, err := p.Get(storage)
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+ if v["value"] != "baz" {
+ t.Fatalf("bad: %#v", v)
+ }
+
+ // Delete via HTTP
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.DeleteOperation,
+ Path: "bar",
+ Data: nil,
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Re-read via HTTP
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "bar",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+ if _, ok := resp.Data["value"]; ok {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Re-read via API
+ v, err = p.Get(storage)
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+ if v != nil {
+ t.Fatalf("bad: %#v", v)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/policy_map.go b/vendor/github.com/hashicorp/vault/logical/framework/policy_map.go
new file mode 100644
index 0000000..fa6b4bc
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/framework/policy_map.go
@@ -0,0 +1,64 @@
+package framework
+
+import (
+ "sort"
+ "strings"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+// PolicyMap is a specialization of PathMap that expects the values to
+// be lists of policies. This assists in querying and loading policies
+// from the PathMap.
+type PolicyMap struct {
+ PathMap
+
+ DefaultKey string
+ PolicyKey string
+}
+
+func (p *PolicyMap) Policies(s logical.Storage, names ...string) ([]string, error) {
+ policyKey := "value"
+ if p.PolicyKey != "" {
+ policyKey = p.PolicyKey
+ }
+
+ if p.DefaultKey != "" {
+ newNames := make([]string, len(names)+1)
+ newNames[0] = p.DefaultKey
+ copy(newNames[1:], names)
+ names = newNames
+ }
+
+ set := make(map[string]struct{})
+ for _, name := range names {
+ v, err := p.Get(s, name)
+ if err != nil {
+ return nil, err
+ }
+
+ valuesRaw, ok := v[policyKey]
+ if !ok {
+ continue
+ }
+
+ values, ok := valuesRaw.(string)
+ if !ok {
+ continue
+ }
+
+ for _, p := range strings.Split(values, ",") {
+ if p = strings.TrimSpace(p); p != "" {
+ set[p] = struct{}{}
+ }
+ }
+ }
+
+ list := make([]string, 0, len(set))
+ for k, _ := range set {
+ list = append(list, k)
+ }
+ sort.Strings(list)
+
+ return list, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/policy_map_test.go b/vendor/github.com/hashicorp/vault/logical/framework/policy_map_test.go
new file mode 100644
index 0000000..14d8f66
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/framework/policy_map_test.go
@@ -0,0 +1,28 @@
+package framework
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestPolicyMap(t *testing.T) {
+ p := &PolicyMap{}
+ p.PathMap.Name = "foo"
+ s := new(logical.InmemStorage)
+
+ p.Put(s, "foo", map[string]interface{}{"value": "bar"})
+ p.Put(s, "bar", map[string]interface{}{"value": "foo,baz "})
+
+ // Read via API
+ actual, err := p.Policies(s, "foo", "bar")
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+
+ expected := []string{"bar", "baz", "foo"}
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: %#v", actual)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/secret.go b/vendor/github.com/hashicorp/vault/logical/framework/secret.go
new file mode 100644
index 0000000..c4f71ee
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/framework/secret.go
@@ -0,0 +1,90 @@
+package framework
+
+import (
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+// Secret is a type of secret that can be returned from a backend.
+type Secret struct {
+ // Type is the name of this secret type. This is used to setup the
+ // vault ID and to look up the proper secret structure when revocation/
+ // renewal happens. Once this is set this should not be changed.
+ //
+ // The format of this must match (case insensitive): ^a-Z0-9_$
+ Type string
+
+ // Fields is the mapping of data fields and schema that comprise
+ // the structure of this secret.
+ Fields map[string]*FieldSchema
+
+ // DefaultDuration is the default value for the duration of the lease for
+ // this secret. This can be manually overwritten with the result of
+ // Response().
+ //
+ // If these aren't set, Vault core will set a default lease period which
+ // may come from a mount tuning.
+ DefaultDuration time.Duration
+
+ // Renew is the callback called to renew this secret. If Renew is
+ // not specified then renewable is set to false in the secret.
+ // See lease.go for helpers for this value.
+ Renew OperationFunc
+
+ // Revoke is the callback called to revoke this secret. This is required.
+ Revoke OperationFunc
+}
+
+func (s *Secret) Renewable() bool {
+ return s.Renew != nil
+}
+
+func (s *Secret) Response(
+ data, internal map[string]interface{}) *logical.Response {
+ internalData := make(map[string]interface{})
+ for k, v := range internal {
+ internalData[k] = v
+ }
+ internalData["secret_type"] = s.Type
+
+ return &logical.Response{
+ Secret: &logical.Secret{
+ LeaseOptions: logical.LeaseOptions{
+ TTL: s.DefaultDuration,
+ Renewable: s.Renewable(),
+ },
+ InternalData: internalData,
+ },
+
+ Data: data,
+ }
+}
+
+// HandleRenew is the request handler for renewing this secret.
+func (s *Secret) HandleRenew(req *logical.Request) (*logical.Response, error) {
+ if !s.Renewable() {
+ return nil, logical.ErrUnsupportedOperation
+ }
+
+ data := &FieldData{
+ Raw: req.Data,
+ Schema: s.Fields,
+ }
+
+ return s.Renew(req, data)
+}
+
+// HandleRevoke is the request handler for renewing this secret.
+func (s *Secret) HandleRevoke(req *logical.Request) (*logical.Response, error) {
+ data := &FieldData{
+ Raw: req.Data,
+ Schema: s.Fields,
+ }
+
+ if s.Revoke != nil {
+ return s.Revoke(req, data)
+ }
+
+ return nil, logical.ErrUnsupportedOperation
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/secret_test.go b/vendor/github.com/hashicorp/vault/logical/framework/secret_test.go
new file mode 100644
index 0000000..83af475
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/framework/secret_test.go
@@ -0,0 +1 @@
+package framework
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/template.go b/vendor/github.com/hashicorp/vault/logical/framework/template.go
new file mode 100644
index 0000000..5ac82ef
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/framework/template.go
@@ -0,0 +1,41 @@
+package framework
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "strings"
+ "text/template"
+)
+
+func executeTemplate(tpl string, data interface{}) (string, error) {
+ // Define the functions
+ funcs := map[string]interface{}{
+ "indent": funcIndent,
+ }
+
+ // Parse the help template
+ t, err := template.New("root").Funcs(funcs).Parse(tpl)
+ if err != nil {
+ return "", fmt.Errorf("error parsing template: %s", err)
+ }
+
+ // Execute the template and store the output
+ var buf bytes.Buffer
+ if err := t.Execute(&buf, data); err != nil {
+ return "", fmt.Errorf("error executing template: %s", err)
+ }
+
+ return strings.TrimSpace(buf.String()), nil
+}
+
+func funcIndent(count int, text string) string {
+ var buf bytes.Buffer
+ prefix := strings.Repeat(" ", count)
+ scan := bufio.NewScanner(strings.NewReader(text))
+ for scan.Scan() {
+ buf.WriteString(prefix + scan.Text() + "\n")
+ }
+
+ return strings.TrimRight(buf.String(), "\n")
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/testing.go b/vendor/github.com/hashicorp/vault/logical/framework/testing.go
new file mode 100644
index 0000000..a00a324
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/framework/testing.go
@@ -0,0 +1,15 @@
+package framework
+
+import (
+ "testing"
+)
+
+// TestBackendRoutes is a helper to test that all the given routes will
+// route properly in the backend.
+func TestBackendRoutes(t *testing.T, b *Backend, rs []string) {
+ for _, r := range rs {
+ if b.Route(r) == nil {
+ t.Fatalf("bad route: %s", r)
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/wal.go b/vendor/github.com/hashicorp/vault/logical/framework/wal.go
new file mode 100644
index 0000000..4e37aec
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/framework/wal.go
@@ -0,0 +1,100 @@
+package framework
+
+import (
+ "encoding/json"
+ "strings"
+ "time"
+
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/logical"
+)
+
+// WALPrefix is the prefix within Storage where WAL entries will be written.
+const WALPrefix = "wal/"
+
+type WALEntry struct {
+ ID string `json:"-"`
+ Kind string `json:"type"`
+ Data interface{} `json:"data"`
+ CreatedAt int64 `json:"created_at"`
+}
+
+// PutWAL writes some data to the WAL.
+//
+// The kind parameter is used by the framework to allow users to store
+// multiple kinds of WAL data and to easily disambiguate what data they're
+// expecting.
+//
+// Data within the WAL that is uncommitted (CommitWAL hasn't be called)
+// will be given to the rollback callback when an rollback operation is
+// received, allowing the backend to clean up some partial states.
+//
+// The data must be JSON encodable.
+//
+// This returns a unique ID that can be used to reference this WAL data.
+// WAL data cannot be modified. You can only add to the WAL and commit existing
+// WAL entries.
+func PutWAL(s logical.Storage, kind string, data interface{}) (string, error) {
+ value, err := json.Marshal(&WALEntry{
+ Kind: kind,
+ Data: data,
+ CreatedAt: time.Now().UTC().Unix(),
+ })
+ if err != nil {
+ return "", err
+ }
+
+ id, err := uuid.GenerateUUID()
+ if err != nil {
+ return "", err
+ }
+
+ return id, s.Put(&logical.StorageEntry{
+ Key: WALPrefix + id,
+ Value: value,
+ })
+}
+
+// GetWAL reads a specific entry from the WAL. If the entry doesn't exist,
+// then nil value is returned.
+//
+// The kind, value, and error are returned.
+func GetWAL(s logical.Storage, id string) (*WALEntry, error) {
+ entry, err := s.Get(WALPrefix + id)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var raw WALEntry
+ if err := jsonutil.DecodeJSON(entry.Value, &raw); err != nil {
+ return nil, err
+ }
+ raw.ID = id
+
+ return &raw, nil
+}
+
+// DeleteWAL commits the WAL entry with the given ID. Once committed,
+// it is assumed that the operation was a success and doesn't need to
+// be rolled back.
+func DeleteWAL(s logical.Storage, id string) error {
+ return s.Delete(WALPrefix + id)
+}
+
+// ListWAL lists all the entries in the WAL.
+func ListWAL(s logical.Storage) ([]string, error) {
+ keys, err := s.List(WALPrefix)
+ if err != nil {
+ return nil, err
+ }
+
+ for i, k := range keys {
+ keys[i] = strings.TrimPrefix(k, WALPrefix)
+ }
+
+ return keys, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/wal_test.go b/vendor/github.com/hashicorp/vault/logical/framework/wal_test.go
new file mode 100644
index 0000000..8ee12dc
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/framework/wal_test.go
@@ -0,0 +1,60 @@
+package framework
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestWAL(t *testing.T) {
+ s := new(logical.InmemStorage)
+
+ // WAL should be empty to start
+ keys, err := ListWAL(s)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if len(keys) > 0 {
+ t.Fatalf("bad: %#v", keys)
+ }
+
+ // Write an entry to the WAL
+ id, err := PutWAL(s, "foo", "bar")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // The key should be in the WAL
+ keys, err = ListWAL(s)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if !reflect.DeepEqual(keys, []string{id}) {
+ t.Fatalf("bad: %#v", keys)
+ }
+
+ // Should be able to get the value
+ entry, err := GetWAL(s, id)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if entry.Kind != "foo" {
+ t.Fatalf("bad: %#v", entry)
+ }
+ if entry.Data != "bar" {
+ t.Fatalf("bad: %#v", entry)
+ }
+
+ // Should be able to delete the value
+ if err := DeleteWAL(s, id); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ entry, err = GetWAL(s, id)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if entry != nil {
+ t.Fatalf("bad: %#v", entry)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/lease.go b/vendor/github.com/hashicorp/vault/logical/lease.go
new file mode 100644
index 0000000..ed0b26b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/lease.go
@@ -0,0 +1,48 @@
+package logical
+
+import "time"
+
+// LeaseOptions is an embeddable struct to capture common lease
+// settings between a Secret and Auth
+type LeaseOptions struct {
+ // Lease is the duration that this secret is valid for. Vault
+ // will automatically revoke it after the duration.
+ TTL time.Duration `json:"lease"`
+
+ // Renewable, if true, means that this secret can be renewed.
+ Renewable bool `json:"renewable"`
+
+ // Increment will be the lease increment that the user requested.
+ // This is only available on a Renew operation and has no effect
+ // when returning a response.
+ Increment time.Duration `json:"-"`
+
+ // IssueTime is the time of issue for the original lease. This is
+ // only available on a Renew operation and has no effect when returning
+ // a response. It can be used to enforce maximum lease periods by
+ // a logical backend.
+ IssueTime time.Time `json:"-"`
+}
+
+// LeaseEnabled checks if leasing is enabled
+func (l *LeaseOptions) LeaseEnabled() bool {
+ return l.TTL > 0
+}
+
+// LeaseTotal is the lease duration with a guard against a negative TTL
+func (l *LeaseOptions) LeaseTotal() time.Duration {
+ if l.TTL <= 0 {
+ return 0
+ }
+
+ return l.TTL
+}
+
+// ExpirationTime computes the time until expiration including the grace period
+func (l *LeaseOptions) ExpirationTime() time.Time {
+ var expireTime time.Time
+ if l.LeaseEnabled() {
+ expireTime = time.Now().Add(l.LeaseTotal())
+ }
+ return expireTime
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/lease_test.go b/vendor/github.com/hashicorp/vault/logical/lease_test.go
new file mode 100644
index 0000000..050b7db
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/lease_test.go
@@ -0,0 +1,56 @@
+package logical
+
+import (
+ "testing"
+ "time"
+)
+
+func TestLeaseOptionsLeaseTotal(t *testing.T) {
+ var l LeaseOptions
+ l.TTL = 1 * time.Hour
+
+ actual := l.LeaseTotal()
+ expected := l.TTL
+ if actual != expected {
+ t.Fatalf("bad: %s", actual)
+ }
+}
+
+func TestLeaseOptionsLeaseTotal_grace(t *testing.T) {
+ var l LeaseOptions
+ l.TTL = 1 * time.Hour
+
+ actual := l.LeaseTotal()
+ if actual != l.TTL {
+ t.Fatalf("bad: %s", actual)
+ }
+}
+
+func TestLeaseOptionsLeaseTotal_negLease(t *testing.T) {
+ var l LeaseOptions
+ l.TTL = -1 * 1 * time.Hour
+
+ actual := l.LeaseTotal()
+ expected := time.Duration(0)
+ if actual != expected {
+ t.Fatalf("bad: %s", actual)
+ }
+}
+
+func TestLeaseOptionsExpirationTime(t *testing.T) {
+ var l LeaseOptions
+ l.TTL = 1 * time.Hour
+
+ limit := time.Now().Add(time.Hour)
+ exp := l.ExpirationTime()
+ if exp.Before(limit) {
+ t.Fatalf("bad: %s", exp)
+ }
+}
+
+func TestLeaseOptionsExpirationTime_noLease(t *testing.T) {
+ var l LeaseOptions
+ if !l.ExpirationTime().IsZero() {
+ t.Fatal("should be zero")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/logical.go b/vendor/github.com/hashicorp/vault/logical/logical.go
new file mode 100644
index 0000000..3b66fba
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/logical.go
@@ -0,0 +1,82 @@
+package logical
+
+import log "github.com/mgutz/logxi/v1"
+
+// Backend interface must be implemented to be "mountable" at
+// a given path. Requests flow through a router which has various mount
+// points that flow to a logical backend. The logic of each backend is flexible,
+// and this is what allows materialized keys to function. There can be specialized
+// logical backends for various upstreams (Consul, PostgreSQL, MySQL, etc) that can
+// interact with remote APIs to generate keys dynamically. This interface also
+// allows for a "procfs" like interaction, as internal state can be exposed by
+// acting like a logical backend and being mounted.
+type Backend interface {
+ // HandleRequest is used to handle a request and generate a response.
+ // The backends must check the operation type and handle appropriately.
+ HandleRequest(*Request) (*Response, error)
+
+ // SpecialPaths is a list of paths that are special in some way.
+ // See PathType for the types of special paths. The key is the type
+ // of the special path, and the value is a list of paths for this type.
+ // This is not a regular expression but is an exact match. If the path
+ // ends in '*' then it is a prefix-based match. The '*' can only appear
+ // at the end.
+ SpecialPaths() *Paths
+
+ // System provides an interface to access certain system configuration
+ // information, such as globally configured default and max lease TTLs.
+ System() SystemView
+
+ // HandleExistenceCheck is used to handle a request and generate a response
+ // indicating whether the given path exists or not; this is used to
+ // understand whether the request must have a Create or Update capability
+ // ACL applied. The first bool indicates whether an existence check
+ // function was found for the backend; the second indicates whether, if an
+ // existence check function was found, the item exists or not.
+ HandleExistenceCheck(*Request) (bool, bool, error)
+
+ // Cleanup is invoked during an unmount of a backend to allow it to
+ // handle any cleanup like connection closing or releasing of file handles.
+ Cleanup()
+
+ // Initialize is invoked after a backend is created. It is the place to run
+ // any operations requiring storage; these should not be in the factory.
+ Initialize() error
+
+ // InvalidateKey may be invoked when an object is modified that belongs
+ // to the backend. The backend can use this to clear any caches or reset
+ // internal state as needed.
+ InvalidateKey(key string)
+}
+
+// BackendConfig is provided to the factory to initialize the backend
+type BackendConfig struct {
+ // View should not be stored, and should only be used for initialization
+ StorageView Storage
+
+ // The backend should use this logger. The log should not contain any secrets.
+ Logger log.Logger
+
+ // System provides a view into a subset of safe system information that
+ // is useful for backends, such as the default/max lease TTLs
+ System SystemView
+
+ // Config is the opaque user configuration provided when mounting
+ Config map[string]string
+}
+
+// Factory is the factory function to create a logical backend.
+type Factory func(*BackendConfig) (Backend, error)
+
+// Paths is the structure of special paths that is used for SpecialPaths.
+type Paths struct {
+ // Root are the paths that require a root token to access
+ Root []string
+
+ // Unauthenticated are the paths that can be accessed without any auth.
+ Unauthenticated []string
+
+ // LocalStorage are paths (prefixes) that are local to this instance; this
+ // indicates that these paths should not be replicated
+ LocalStorage []string
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/request.go b/vendor/github.com/hashicorp/vault/logical/request.go
new file mode 100644
index 0000000..c41b1dc
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/request.go
@@ -0,0 +1,204 @@
+package logical
+
+import (
+ "errors"
+ "fmt"
+ "time"
+)
+
+// RequestWrapInfo is a struct that stores information about desired response
+// wrapping behavior
+type RequestWrapInfo struct {
+ // Setting to non-zero specifies that the response should be wrapped.
+ // Specifies the desired TTL of the wrapping token.
+ TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl"`
+
+ // The format to use for the wrapped response; if not specified it's a bare
+ // token
+ Format string `json:"format" structs:"format" mapstructure:"format"`
+}
+
+// Request is a struct that stores the parameters and context
+// of a request being made to Vault. It is used to abstract
+// the details of the higher level request protocol from the handlers.
+type Request struct {
+ // Id is the uuid associated with each request
+ ID string `json:"id" structs:"id" mapstructure:"id"`
+
+ // If set, the name given to the replication secondary where this request
+ // originated
+ ReplicationCluster string `json:"replication_cluster" structs:"replication_cluster", mapstructure:"replication_cluster"`
+
+ // Operation is the requested operation type
+ Operation Operation `json:"operation" structs:"operation" mapstructure:"operation"`
+
+ // Path is the part of the request path not consumed by the
+ // routing. As an example, if the original request path is "prod/aws/foo"
+ // and the AWS logical backend is mounted at "prod/aws/", then the
+ // final path is "foo" since the mount prefix is trimmed.
+ Path string `json:"path" structs:"path" mapstructure:"path"`
+
+ // Request data is an opaque map that must have string keys.
+ Data map[string]interface{} `json:"map" structs:"data" mapstructure:"data"`
+
+ // Storage can be used to durably store and retrieve state.
+ Storage Storage `json:"-"`
+
+ // Secret will be non-nil only for Revoke and Renew operations
+ // to represent the secret that was returned prior.
+ Secret *Secret `json:"secret" structs:"secret" mapstructure:"secret"`
+
+ // Auth will be non-nil only for Renew operations
+ // to represent the auth that was returned prior.
+ Auth *Auth `json:"auth" structs:"auth" mapstructure:"auth"`
+
+ // Headers will contain the http headers from the request. This value will
+ // be used in the audit broker to ensure we are auditing only the allowed
+ // headers.
+ Headers map[string][]string `json:"headers" structs:"headers" mapstructure:"headers"`
+
+ // Connection will be non-nil only for credential providers to
+ // inspect the connection information and potentially use it for
+ // authentication/protection.
+ Connection *Connection `json:"connection" structs:"connection" mapstructure:"connection"`
+
+ // ClientToken is provided to the core so that the identity
+ // can be verified and ACLs applied. This value is passed
+ // through to the logical backends but after being salted and
+ // hashed.
+ ClientToken string `json:"client_token" structs:"client_token" mapstructure:"client_token"`
+
+ // ClientTokenAccessor is provided to the core so that the it can get
+ // logged as part of request audit logging.
+ ClientTokenAccessor string `json:"client_token_accessor" structs:"client_token_accessor" mapstructure:"client_token_accessor"`
+
+ // DisplayName is provided to the logical backend to help associate
+ // dynamic secrets with the source entity. This is not a sensitive
+ // name, but is useful for operators.
+ DisplayName string `json:"display_name" structs:"display_name" mapstructure:"display_name"`
+
+ // MountPoint is provided so that a logical backend can generate
+ // paths relative to itself. The `Path` is effectively the client
+ // request path with the MountPoint trimmed off.
+ MountPoint string `json:"mount_point" structs:"mount_point" mapstructure:"mount_point"`
+
+ // MountType is provided so that a logical backend can make decisions
+ // based on the specific mount type (e.g., if a mount type has different
+ // aliases, generating different defaults depending on the alias)
+ MountType string `json:"mount_type" structs:"mount_type" mapstructure:"mount_type"`
+
+ // WrapInfo contains requested response wrapping parameters
+ WrapInfo *RequestWrapInfo `json:"wrap_info" structs:"wrap_info" mapstructure:"wrap_info"`
+
+ // ClientTokenRemainingUses represents the allowed number of uses left on the
+ // token supplied
+ ClientTokenRemainingUses int `json:"client_token_remaining_uses" structs:"client_token_remaining_uses" mapstructure:"client_token_remaining_uses"`
+
+ // For replication, contains the last WAL on the remote side after handling
+ // the request, used for best-effort avoidance of stale read-after-write
+ lastRemoteWAL uint64
+}
+
+// Get returns a data field and guards for nil Data
+func (r *Request) Get(key string) interface{} {
+ if r.Data == nil {
+ return nil
+ }
+ return r.Data[key]
+}
+
+// GetString returns a data field as a string
+func (r *Request) GetString(key string) string {
+ raw := r.Get(key)
+ s, _ := raw.(string)
+ return s
+}
+
+func (r *Request) GoString() string {
+ return fmt.Sprintf("*%#v", *r)
+}
+
+func (r *Request) LastRemoteWAL() uint64 {
+ return r.lastRemoteWAL
+}
+
+func (r *Request) SetLastRemoteWAL(last uint64) {
+ r.lastRemoteWAL = last
+}
+
+// RenewRequest creates the structure of the renew request.
+func RenewRequest(
+ path string, secret *Secret, data map[string]interface{}) *Request {
+ return &Request{
+ Operation: RenewOperation,
+ Path: path,
+ Data: data,
+ Secret: secret,
+ }
+}
+
+// RenewAuthRequest creates the structure of the renew request for an auth.
+func RenewAuthRequest(
+ path string, auth *Auth, data map[string]interface{}) *Request {
+ return &Request{
+ Operation: RenewOperation,
+ Path: path,
+ Data: data,
+ Auth: auth,
+ }
+}
+
+// RevokeRequest creates the structure of the revoke request.
+func RevokeRequest(
+ path string, secret *Secret, data map[string]interface{}) *Request {
+ return &Request{
+ Operation: RevokeOperation,
+ Path: path,
+ Data: data,
+ Secret: secret,
+ }
+}
+
+// RollbackRequest creates the structure of the revoke request.
+func RollbackRequest(path string) *Request {
+ return &Request{
+ Operation: RollbackOperation,
+ Path: path,
+ Data: make(map[string]interface{}),
+ }
+}
+
+// Operation is an enum that is used to specify the type
+// of request being made
+type Operation string
+
+const (
+ // The operations below are called per path
+ CreateOperation Operation = "create"
+ ReadOperation = "read"
+ UpdateOperation = "update"
+ DeleteOperation = "delete"
+ ListOperation = "list"
+ HelpOperation = "help"
+
+ // The operations below are called globally, the path is less relevant.
+ RevokeOperation Operation = "revoke"
+ RenewOperation = "renew"
+ RollbackOperation = "rollback"
+)
+
+var (
+ // ErrUnsupportedOperation is returned if the operation is not supported
+ // by the logical backend.
+ ErrUnsupportedOperation = errors.New("unsupported operation")
+
+ // ErrUnsupportedPath is returned if the path is not supported
+ // by the logical backend.
+ ErrUnsupportedPath = errors.New("unsupported path")
+
+ // ErrInvalidRequest is returned if the request is invalid
+ ErrInvalidRequest = errors.New("invalid request")
+
+ // ErrPermissionDenied is returned if the client is not authorized
+ ErrPermissionDenied = errors.New("permission denied")
+)
diff --git a/vendor/github.com/hashicorp/vault/logical/response.go b/vendor/github.com/hashicorp/vault/logical/response.go
new file mode 100644
index 0000000..ee6bfe1
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/response.go
@@ -0,0 +1,202 @@
+package logical
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "time"
+
+ "github.com/mitchellh/copystructure"
+)
+
+const (
+ // HTTPContentType can be specified in the Data field of a Response
+ // so that the HTTP front end can specify a custom Content-Type associated
+ // with the HTTPRawBody. This can only be used for non-secrets, and should
+ // be avoided unless absolutely necessary, such as implementing a specification.
+ // The value must be a string.
+ HTTPContentType = "http_content_type"
+
+ // HTTPRawBody is the raw content of the HTTP body that goes with the HTTPContentType.
+ // This can only be specified for non-secrets, and should should be similarly
+ // avoided like the HTTPContentType. The value must be a byte slice.
+ HTTPRawBody = "http_raw_body"
+
+ // HTTPStatusCode is the response code of the HTTP body that goes with the HTTPContentType.
+ // This can only be specified for non-secrets, and should should be similarly
+ // avoided like the HTTPContentType. The value must be an integer.
+ HTTPStatusCode = "http_status_code"
+)
+
+type ResponseWrapInfo struct {
+ // Setting to non-zero specifies that the response should be wrapped.
+ // Specifies the desired TTL of the wrapping token.
+ TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl"`
+
+ // The token containing the wrapped response
+ Token string `json:"token" structs:"token" mapstructure:"token"`
+
+ // The creation time. This can be used with the TTL to figure out an
+ // expected expiration.
+ CreationTime time.Time `json:"creation_time" structs:"creation_time" mapstructure:"cration_time"`
+
+ // If the contained response is the output of a token creation call, the
+ // created token's accessor will be accessible here
+ WrappedAccessor string `json:"wrapped_accessor" structs:"wrapped_accessor" mapstructure:"wrapped_accessor"`
+
+ // The format to use. This doesn't get returned, it's only internal.
+ Format string `json:"format" structs:"format" mapstructure:"format"`
+}
+
+// Response is a struct that stores the response of a request.
+// It is used to abstract the details of the higher level request protocol.
+type Response struct {
+ // Secret, if not nil, denotes that this response represents a secret.
+ Secret *Secret `json:"secret" structs:"secret" mapstructure:"secret"`
+
+ // Auth, if not nil, contains the authentication information for
+ // this response. This is only checked and means something for
+ // credential backends.
+ Auth *Auth `json:"auth" structs:"auth" mapstructure:"auth"`
+
+ // Response data is an opaque map that must have string keys. For
+ // secrets, this data is sent down to the user as-is. To store internal
+ // data that you don't want the user to see, store it in
+ // Secret.InternalData.
+ Data map[string]interface{} `json:"data" structs:"data" mapstructure:"data"`
+
+ // Redirect is an HTTP URL to redirect to for further authentication.
+ // This is only valid for credential backends. This will be blanked
+ // for any logical backend and ignored.
+ Redirect string `json:"redirect" structs:"redirect" mapstructure:"redirect"`
+
+ // Warnings allow operations or backends to return warnings in response
+ // to user actions without failing the action outright.
+ // Making it private helps ensure that it is easy for various parts of
+ // Vault (backend, core, etc.) to add warnings without accidentally
+ // replacing what exists.
+ warnings []string `json:"warnings" structs:"warnings" mapstructure:"warnings"`
+
+ // Information for wrapping the response in a cubbyhole
+ WrapInfo *ResponseWrapInfo `json:"wrap_info" structs:"wrap_info" mapstructure:"wrap_info"`
+}
+
+func init() {
+ copystructure.Copiers[reflect.TypeOf(Response{})] = func(v interface{}) (interface{}, error) {
+ input := v.(Response)
+ ret := Response{
+ Redirect: input.Redirect,
+ }
+
+ if input.Secret != nil {
+ retSec, err := copystructure.Copy(input.Secret)
+ if err != nil {
+ return nil, fmt.Errorf("error copying Secret: %v", err)
+ }
+ ret.Secret = retSec.(*Secret)
+ }
+
+ if input.Auth != nil {
+ retAuth, err := copystructure.Copy(input.Auth)
+ if err != nil {
+ return nil, fmt.Errorf("error copying Auth: %v", err)
+ }
+ ret.Auth = retAuth.(*Auth)
+ }
+
+ if input.Data != nil {
+ retData, err := copystructure.Copy(&input.Data)
+ if err != nil {
+ return nil, fmt.Errorf("error copying Data: %v", err)
+ }
+ ret.Data = *(retData.(*map[string]interface{}))
+ }
+
+ if input.Warnings() != nil {
+ for _, warning := range input.Warnings() {
+ ret.AddWarning(warning)
+ }
+ }
+
+ if input.WrapInfo != nil {
+ retWrapInfo, err := copystructure.Copy(input.WrapInfo)
+ if err != nil {
+ return nil, fmt.Errorf("error copying WrapInfo: %v", err)
+ }
+ ret.WrapInfo = retWrapInfo.(*ResponseWrapInfo)
+ }
+
+ return &ret, nil
+ }
+}
+
+// AddWarning adds a warning into the response's warning list
+func (r *Response) AddWarning(warning string) {
+ if r.warnings == nil {
+ r.warnings = make([]string, 0, 1)
+ }
+ r.warnings = append(r.warnings, warning)
+}
+
+// Warnings returns the list of warnings set on the response
+func (r *Response) Warnings() []string {
+ return r.warnings
+}
+
+// ClearWarnings clears the response's warning list
+func (r *Response) ClearWarnings() {
+ r.warnings = make([]string, 0, 1)
+}
+
+// Copies the warnings from the other response to this one
+func (r *Response) CloneWarnings(other *Response) {
+ r.warnings = other.warnings
+}
+
+// IsError returns true if this response seems to indicate an error.
+func (r *Response) IsError() bool {
+ return r != nil && r.Data != nil && len(r.Data) == 1 && r.Data["error"] != nil
+}
+
+func (r *Response) Error() error {
+ if !r.IsError() {
+ return nil
+ }
+ switch r.Data["error"].(type) {
+ case string:
+ return errors.New(r.Data["error"].(string))
+ case error:
+ return r.Data["error"].(error)
+ }
+ return nil
+}
+
+// HelpResponse is used to format a help response
+func HelpResponse(text string, seeAlso []string) *Response {
+ return &Response{
+ Data: map[string]interface{}{
+ "help": text,
+ "see_also": seeAlso,
+ },
+ }
+}
+
+// ErrorResponse is used to format an error response
+func ErrorResponse(text string) *Response {
+ return &Response{
+ Data: map[string]interface{}{
+ "error": text,
+ },
+ }
+}
+
+// ListResponse is used to format a response to a list operation.
+func ListResponse(keys []string) *Response {
+ resp := &Response{
+ Data: map[string]interface{}{},
+ }
+ if len(keys) != 0 {
+ resp.Data["keys"] = keys
+ }
+ return resp
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/response_util.go b/vendor/github.com/hashicorp/vault/logical/response_util.go
new file mode 100644
index 0000000..a3fd2bf
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/response_util.go
@@ -0,0 +1,111 @@
+package logical
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+
+ "github.com/hashicorp/errwrap"
+ multierror "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/vault/helper/consts"
+)
+
+// RespondErrorCommon pulls most of the functionality from http's
+// respondErrorCommon and some of http's handleLogical and makes it available
+// to both the http package and elsewhere.
+func RespondErrorCommon(req *Request, resp *Response, err error) (int, error) {
+ if err == nil && (resp == nil || !resp.IsError()) {
+ switch {
+ case req.Operation == ReadOperation:
+ if resp == nil {
+ return http.StatusNotFound, nil
+ }
+
+ // Basically: if we have empty "keys" or no keys at all, 404. This
+ // provides consistency with GET.
+ case req.Operation == ListOperation && resp.WrapInfo == nil:
+ if resp == nil || len(resp.Data) == 0 {
+ return http.StatusNotFound, nil
+ }
+ keysRaw, ok := resp.Data["keys"]
+ if !ok || keysRaw == nil {
+ return http.StatusNotFound, nil
+ }
+ keys, ok := keysRaw.([]string)
+ if !ok {
+ return http.StatusInternalServerError, nil
+ }
+ if len(keys) == 0 {
+ return http.StatusNotFound, nil
+ }
+ }
+
+ return 0, nil
+ }
+
+ if errwrap.ContainsType(err, new(ReplicationCodedError)) {
+ var allErrors error
+ codedErr := errwrap.GetType(err, new(ReplicationCodedError)).(*ReplicationCodedError)
+ errwrap.Walk(err, func(inErr error) {
+ newErr, ok := inErr.(*ReplicationCodedError)
+ if !ok {
+ allErrors = multierror.Append(allErrors, newErr)
+ }
+ })
+ if allErrors != nil {
+ return codedErr.Code, multierror.Append(errors.New(fmt.Sprintf("errors from both primary and secondary; primary error was %v; secondary errors follow", codedErr.Msg)), allErrors)
+ }
+ return codedErr.Code, errors.New(codedErr.Msg)
+ }
+
+ // Start out with internal server error since in most of these cases there
+ // won't be a response so this won't be overridden
+ statusCode := http.StatusInternalServerError
+ // If we actually have a response, start out with bad request
+ if resp != nil {
+ statusCode = http.StatusBadRequest
+ }
+
+ // Now, check the error itself; if it has a specific logical error, set the
+ // appropriate code
+ if err != nil {
+ switch {
+ case errwrap.ContainsType(err, new(StatusBadRequest)):
+ statusCode = http.StatusBadRequest
+ case errwrap.Contains(err, ErrPermissionDenied.Error()):
+ statusCode = http.StatusForbidden
+ case errwrap.Contains(err, ErrUnsupportedOperation.Error()):
+ statusCode = http.StatusMethodNotAllowed
+ case errwrap.Contains(err, ErrUnsupportedPath.Error()):
+ statusCode = http.StatusNotFound
+ case errwrap.Contains(err, ErrInvalidRequest.Error()):
+ statusCode = http.StatusBadRequest
+ }
+ }
+
+ if resp != nil && resp.IsError() {
+ err = fmt.Errorf("%s", resp.Data["error"].(string))
+ }
+
+ return statusCode, err
+}
+
+// AdjustErrorStatusCode adjusts the status that will be sent in error
+// conditions in a way that can be shared across http's respondError and other
+// locations.
+func AdjustErrorStatusCode(status *int, err error) {
+ // Adjust status code when sealed
+ if errwrap.Contains(err, consts.ErrSealed.Error()) {
+ *status = http.StatusServiceUnavailable
+ }
+
+ // Adjust status code on
+ if errwrap.Contains(err, "http: request body too large") {
+ *status = http.StatusRequestEntityTooLarge
+ }
+
+ // Allow HTTPCoded error passthrough to specify a code
+ if t, ok := err.(HTTPCodedError); ok {
+ *status = t.Code()
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/secret.go b/vendor/github.com/hashicorp/vault/logical/secret.go
new file mode 100644
index 0000000..27ad8d9
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/secret.go
@@ -0,0 +1,30 @@
+package logical
+
+import "fmt"
+
+// Secret represents the secret part of a response.
+type Secret struct {
+ LeaseOptions
+
+ // InternalData is JSON-encodable data that is stored with the secret.
+ // This will be sent back during a Renew/Revoke for storing internal data
+ // used for those operations.
+ InternalData map[string]interface{} `json:"internal_data"`
+
+ // LeaseID is the ID returned to the user to manage this secret.
+ // This is generated by Vault core. Any set value will be ignored.
+ // For requests, this will always be blank.
+ LeaseID string
+}
+
+func (s *Secret) Validate() error {
+ if s.TTL < 0 {
+ return fmt.Errorf("ttl duration must not be less than zero")
+ }
+
+ return nil
+}
+
+func (s *Secret) GoString() string {
+ return fmt.Sprintf("*%#v", *s)
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/storage.go b/vendor/github.com/hashicorp/vault/logical/storage.go
new file mode 100644
index 0000000..51487de
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/storage.go
@@ -0,0 +1,110 @@
+package logical
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/helper/jsonutil"
+)
+
+// ErrReadOnly is returned when a backend does not support
+// writing. This can be caused by a read-only replica or secondary
+// cluster operation.
+var ErrReadOnly = errors.New("Cannot write to readonly storage")
+
+// Storage is the way that logical backends are able read/write data.
+type Storage interface {
+ List(prefix string) ([]string, error)
+ Get(string) (*StorageEntry, error)
+ Put(*StorageEntry) error
+ Delete(string) error
+}
+
+// StorageEntry is the entry for an item in a Storage implementation.
+type StorageEntry struct {
+ Key string
+ Value []byte
+}
+
+// DecodeJSON decodes the 'Value' present in StorageEntry.
+func (e *StorageEntry) DecodeJSON(out interface{}) error {
+ return jsonutil.DecodeJSON(e.Value, out)
+}
+
+// StorageEntryJSON creates a StorageEntry with a JSON-encoded value.
+func StorageEntryJSON(k string, v interface{}) (*StorageEntry, error) {
+ encodedBytes, err := jsonutil.EncodeJSON(v)
+ if err != nil {
+ return nil, fmt.Errorf("failed to encode storage entry: %v", err)
+ }
+
+ return &StorageEntry{
+ Key: k,
+ Value: encodedBytes,
+ }, nil
+}
+
+type ClearableView interface {
+ List(string) ([]string, error)
+ Delete(string) error
+}
+
+// ScanView is used to scan all the keys in a view iteratively
+func ScanView(view ClearableView, cb func(path string)) error {
+ frontier := []string{""}
+ for len(frontier) > 0 {
+ n := len(frontier)
+ current := frontier[n-1]
+ frontier = frontier[:n-1]
+
+ // List the contents
+ contents, err := view.List(current)
+ if err != nil {
+ return fmt.Errorf("list failed at path '%s': %v", current, err)
+ }
+
+ // Handle the contents in the directory
+ for _, c := range contents {
+ fullPath := current + c
+ if strings.HasSuffix(c, "/") {
+ frontier = append(frontier, fullPath)
+ } else {
+ cb(fullPath)
+ }
+ }
+ }
+ return nil
+}
+
+// CollectKeys is used to collect all the keys in a view
+func CollectKeys(view ClearableView) ([]string, error) {
+ // Accumulate the keys
+ var existing []string
+ cb := func(path string) {
+ existing = append(existing, path)
+ }
+
+ // Scan for all the keys
+ if err := ScanView(view, cb); err != nil {
+ return nil, err
+ }
+ return existing, nil
+}
+
+// ClearView is used to delete all the keys in a view
+func ClearView(view ClearableView) error {
+ // Collect all the keys
+ keys, err := CollectKeys(view)
+ if err != nil {
+ return err
+ }
+
+ // Delete all the keys
+ for _, key := range keys {
+ if err := view.Delete(key); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/storage_inmem.go b/vendor/github.com/hashicorp/vault/logical/storage_inmem.go
new file mode 100644
index 0000000..64c6e2b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/storage_inmem.go
@@ -0,0 +1,53 @@
+package logical
+
+import (
+ "sync"
+
+ "github.com/hashicorp/vault/physical"
+)
+
+// InmemStorage implements Storage and stores all data in memory.
+type InmemStorage struct {
+ phys *physical.InmemBackend
+
+ once sync.Once
+}
+
+func (s *InmemStorage) List(prefix string) ([]string, error) {
+ s.once.Do(s.init)
+
+ return s.phys.List(prefix)
+}
+
+func (s *InmemStorage) Get(key string) (*StorageEntry, error) {
+ s.once.Do(s.init)
+ entry, err := s.phys.Get(key)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+ return &StorageEntry{
+ Key: entry.Key,
+ Value: entry.Value,
+ }, nil
+}
+
+func (s *InmemStorage) Put(entry *StorageEntry) error {
+ s.once.Do(s.init)
+ physEntry := &physical.Entry{
+ Key: entry.Key,
+ Value: entry.Value,
+ }
+ return s.phys.Put(physEntry)
+}
+
+func (s *InmemStorage) Delete(k string) error {
+ s.once.Do(s.init)
+ return s.phys.Delete(k)
+}
+
+func (s *InmemStorage) init() {
+ s.phys = physical.NewInmem(nil)
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/storage_inmem_test.go b/vendor/github.com/hashicorp/vault/logical/storage_inmem_test.go
new file mode 100644
index 0000000..8e0964f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/storage_inmem_test.go
@@ -0,0 +1,9 @@
+package logical
+
+import (
+ "testing"
+)
+
+func TestInmemStorage(t *testing.T) {
+ TestStorage(t, new(InmemStorage))
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/system_view.go b/vendor/github.com/hashicorp/vault/logical/system_view.go
new file mode 100644
index 0000000..d769397
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/system_view.go
@@ -0,0 +1,74 @@
+package logical
+
+import (
+ "time"
+
+ "github.com/hashicorp/vault/helper/consts"
+)
+
+// SystemView exposes system configuration information in a safe way
+// for logical backends to consume
+type SystemView interface {
+ // DefaultLeaseTTL returns the default lease TTL set in Vault configuration
+ DefaultLeaseTTL() time.Duration
+
+ // MaxLeaseTTL returns the max lease TTL set in Vault configuration; backend
+ // authors should take care not to issue credentials that last longer than
+ // this value, as Vault will revoke them
+ MaxLeaseTTL() time.Duration
+
+ // SudoPrivilege returns true if given path has sudo privileges
+ // for the given client token
+ SudoPrivilege(path string, token string) bool
+
+ // Returns true if the mount is tainted. A mount is tainted if it is in the
+ // process of being unmounted. This should only be used in special
+ // circumstances; a primary use-case is as a guard in revocation functions.
+ // If revocation of a backend's leases fails it can keep the unmounting
+ // process from being successful. If the reason for this failure is not
+ // relevant when the mount is tainted (for instance, saving a CRL to disk
+ // when the stored CRL will be removed during the unmounting process
+ // anyways), we can ignore the errors to allow unmounting to complete.
+ Tainted() bool
+
+ // Returns true if caching is disabled. If true, no caches should be used,
+ // despite known slowdowns.
+ CachingDisabled() bool
+
+ // ReplicationState indicates the state of cluster replication
+ ReplicationState() consts.ReplicationState
+}
+
+type StaticSystemView struct {
+ DefaultLeaseTTLVal time.Duration
+ MaxLeaseTTLVal time.Duration
+ SudoPrivilegeVal bool
+ TaintedVal bool
+ CachingDisabledVal bool
+ Primary bool
+ ReplicationStateVal consts.ReplicationState
+}
+
+func (d StaticSystemView) DefaultLeaseTTL() time.Duration {
+ return d.DefaultLeaseTTLVal
+}
+
+func (d StaticSystemView) MaxLeaseTTL() time.Duration {
+ return d.MaxLeaseTTLVal
+}
+
+func (d StaticSystemView) SudoPrivilege(path string, token string) bool {
+ return d.SudoPrivilegeVal
+}
+
+func (d StaticSystemView) Tainted() bool {
+ return d.TaintedVal
+}
+
+func (d StaticSystemView) CachingDisabled() bool {
+ return d.CachingDisabledVal
+}
+
+func (d StaticSystemView) ReplicationState() consts.ReplicationState {
+ return d.ReplicationStateVal
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/testing.go b/vendor/github.com/hashicorp/vault/logical/testing.go
new file mode 100644
index 0000000..5bb60bb
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/testing.go
@@ -0,0 +1,84 @@
+package logical
+
+import (
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/helper/logformat"
+ log "github.com/mgutz/logxi/v1"
+)
+
+// TestRequest is a helper to create a purely in-memory Request struct.
+func TestRequest(t *testing.T, op Operation, path string) *Request {
+ return &Request{
+ Operation: op,
+ Path: path,
+ Data: make(map[string]interface{}),
+ Storage: new(InmemStorage),
+ }
+}
+
+// TestStorage is a helper that can be used from unit tests to verify
+// the behavior of a Storage impl.
+func TestStorage(t *testing.T, s Storage) {
+ keys, err := s.List("")
+ if err != nil {
+ t.Fatalf("list error: %s", err)
+ }
+ if len(keys) > 0 {
+ t.Fatalf("should have no keys to start: %#v", keys)
+ }
+
+ entry := &StorageEntry{Key: "foo", Value: []byte("bar")}
+ if err := s.Put(entry); err != nil {
+ t.Fatalf("put error: %s", err)
+ }
+
+ actual, err := s.Get("foo")
+ if err != nil {
+ t.Fatalf("get error: %s", err)
+ }
+ if !reflect.DeepEqual(actual, entry) {
+ t.Fatalf("wrong value. Expected: %#v\nGot: %#v", entry, actual)
+ }
+
+ keys, err = s.List("")
+ if err != nil {
+ t.Fatalf("list error: %s", err)
+ }
+ if !reflect.DeepEqual(keys, []string{"foo"}) {
+ t.Fatalf("bad keys: %#v", keys)
+ }
+
+ if err := s.Delete("foo"); err != nil {
+ t.Fatalf("put error: %s", err)
+ }
+
+ keys, err = s.List("")
+ if err != nil {
+ t.Fatalf("list error: %s", err)
+ }
+ if len(keys) > 0 {
+ t.Fatalf("should have no keys to start: %#v", keys)
+ }
+}
+
+func TestSystemView() *StaticSystemView {
+ defaultLeaseTTLVal := time.Hour * 24
+ maxLeaseTTLVal := time.Hour * 24 * 2
+ return &StaticSystemView{
+ DefaultLeaseTTLVal: defaultLeaseTTLVal,
+ MaxLeaseTTLVal: maxLeaseTTLVal,
+ }
+}
+
+func TestBackendConfig() *BackendConfig {
+ bc := &BackendConfig{
+ Logger: logformat.NewVaultLogger(log.LevelTrace),
+ System: TestSystemView(),
+ }
+ bc.Logger.SetLevel(log.LevelTrace)
+
+ return bc
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/testing/testing.go b/vendor/github.com/hashicorp/vault/logical/testing/testing.go
new file mode 100644
index 0000000..b2072ea
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/testing/testing.go
@@ -0,0 +1,406 @@
+package testing
+
+import (
+ "crypto/tls"
+ "fmt"
+ "os"
+ "reflect"
+ "sort"
+ "testing"
+
+ log "github.com/mgutz/logxi/v1"
+
+ "github.com/hashicorp/errwrap"
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/physical"
+ "github.com/hashicorp/vault/vault"
+)
+
+// TestEnvVar must be set to a non-empty value for acceptance tests to run.
+const TestEnvVar = "VAULT_ACC"
+
+// TestCase is a single set of tests to run for a backend. A TestCase
+// should generally map 1:1 to each test method for your acceptance
+// tests.
+type TestCase struct {
+ // Precheck, if non-nil, will be called once before the test case
+ // runs at all. This can be used for some validation prior to the
+ // test running.
+ PreCheck func()
+
+ // Backend is the backend that will be mounted.
+ Backend logical.Backend
+
+ // Factory can be used instead of Backend if the
+ // backend requires more construction
+ Factory logical.Factory
+
+ // Steps are the set of operations that are run for this test case.
+ Steps []TestStep
+
+ // Teardown will be called before the test case is over regardless
+ // of if the test succeeded or failed. This should return an error
+ // in the case that the test can't guarantee all resources were
+ // properly cleaned up.
+ Teardown TestTeardownFunc
+
+ // AcceptanceTest, if set, the test case will be run only if
+ // the environment variable VAULT_ACC is set. If not this test case
+ // will be run as a unit test.
+ AcceptanceTest bool
+}
+
+// TestStep is a single step within a TestCase.
+type TestStep struct {
+ // Operation is the operation to execute
+ Operation logical.Operation
+
+ // Path is the request path. The mount prefix will be automatically added.
+ Path string
+
+ // Arguments to pass in
+ Data map[string]interface{}
+
+ // Check is called after this step is executed in order to test that
+ // the step executed successfully. If this is not set, then the next
+ // step will be called
+ Check TestCheckFunc
+
+ // PreFlight is called directly before execution of the request, allowing
+ // modification of the request parameters (e.g. Path) with dynamic values.
+ PreFlight PreFlightFunc
+
+ // ErrorOk, if true, will let erroneous responses through to the check
+ ErrorOk bool
+
+ // Unauthenticated, if true, will make the request unauthenticated.
+ Unauthenticated bool
+
+ // RemoteAddr, if set, will set the remote addr on the request.
+ RemoteAddr string
+
+ // ConnState, if set, will set the tls conneciton state
+ ConnState *tls.ConnectionState
+}
+
+// TestCheckFunc is the callback used for Check in TestStep.
+type TestCheckFunc func(*logical.Response) error
+
+// PreFlightFunc is used to modify request parameters directly before execution
+// in each TestStep.
+type PreFlightFunc func(*logical.Request) error
+
+// TestTeardownFunc is the callback used for Teardown in TestCase.
+type TestTeardownFunc func() error
+
+// Test performs an acceptance test on a backend with the given test case.
+//
+// Tests are not run unless an environmental variable "VAULT_ACC" is
+// set to some non-empty value. This is to avoid test cases surprising
+// a user by creating real resources.
+//
+// Tests will fail unless the verbose flag (`go test -v`, or explicitly
+// the "-test.v" flag) is set. Because some acceptance tests take quite
+// long, we require the verbose flag so users are able to see progress
+// output.
+func Test(tt TestT, c TestCase) {
+ // We only run acceptance tests if an env var is set because they're
+ // slow and generally require some outside configuration.
+ if c.AcceptanceTest && os.Getenv(TestEnvVar) == "" {
+ tt.Skip(fmt.Sprintf(
+ "Acceptance tests skipped unless env '%s' set",
+ TestEnvVar))
+ return
+ }
+
+ // We require verbose mode so that the user knows what is going on.
+ if c.AcceptanceTest && !testTesting && !testing.Verbose() {
+ tt.Fatal("Acceptance tests must be run with the -v flag on tests")
+ return
+ }
+
+ // Run the PreCheck if we have it
+ if c.PreCheck != nil {
+ c.PreCheck()
+ }
+
+ // Check that something is provided
+ if c.Backend == nil && c.Factory == nil {
+ tt.Fatal("Must provide either Backend or Factory")
+ return
+ }
+
+ // Create an in-memory Vault core
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ core, err := vault.NewCore(&vault.CoreConfig{
+ Physical: physical.NewInmem(logger),
+ LogicalBackends: map[string]logical.Factory{
+ "test": func(conf *logical.BackendConfig) (logical.Backend, error) {
+ if c.Backend != nil {
+ return c.Backend, nil
+ }
+ return c.Factory(conf)
+ },
+ },
+ DisableMlock: true,
+ })
+ if err != nil {
+ tt.Fatal("error initializing core: ", err)
+ return
+ }
+
+ // Initialize the core
+ init, err := core.Initialize(&vault.InitParams{
+ BarrierConfig: &vault.SealConfig{
+ SecretShares: 1,
+ SecretThreshold: 1,
+ },
+ RecoveryConfig: nil,
+ })
+ if err != nil {
+ tt.Fatal("error initializing core: ", err)
+ return
+ }
+
+ // Unseal the core
+ if unsealed, err := core.Unseal(init.SecretShares[0]); err != nil {
+ tt.Fatal("error unsealing core: ", err)
+ return
+ } else if !unsealed {
+ tt.Fatal("vault shouldn't be sealed")
+ return
+ }
+
+ // Create an HTTP API server and client
+ ln, addr := http.TestServer(nil, core)
+ defer ln.Close()
+ clientConfig := api.DefaultConfig()
+ clientConfig.Address = addr
+ client, err := api.NewClient(clientConfig)
+ if err != nil {
+ tt.Fatal("error initializing HTTP client: ", err)
+ return
+ }
+
+ // Set the token so we're authenticated
+ client.SetToken(init.RootToken)
+
+ // Mount the backend
+ prefix := "mnt"
+ mountInfo := &api.MountInput{
+ Type: "test",
+ Description: "acceptance test",
+ }
+ if err := client.Sys().Mount(prefix, mountInfo); err != nil {
+ tt.Fatal("error mounting backend: ", err)
+ return
+ }
+
+ // Make requests
+ var revoke []*logical.Request
+ for i, s := range c.Steps {
+ if log.IsWarn() {
+ log.Warn("Executing test step", "step_number", i+1)
+ }
+
+ // Create the request
+ req := &logical.Request{
+ Operation: s.Operation,
+ Path: s.Path,
+ Data: s.Data,
+ }
+ if !s.Unauthenticated {
+ req.ClientToken = client.Token()
+ }
+ if s.RemoteAddr != "" {
+ req.Connection = &logical.Connection{RemoteAddr: s.RemoteAddr}
+ }
+ if s.ConnState != nil {
+ req.Connection = &logical.Connection{ConnState: s.ConnState}
+ }
+
+ if s.PreFlight != nil {
+ ct := req.ClientToken
+ req.ClientToken = ""
+ if err := s.PreFlight(req); err != nil {
+ tt.Error(fmt.Sprintf("Failed preflight for step %d: %s", i+1, err))
+ break
+ }
+ req.ClientToken = ct
+ }
+
+ // Make sure to prefix the path with where we mounted the thing
+ req.Path = fmt.Sprintf("%s/%s", prefix, req.Path)
+
+ // Make the request
+ resp, err := core.HandleRequest(req)
+ if resp != nil && resp.Secret != nil {
+ // Revoke this secret later
+ revoke = append(revoke, &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "sys/revoke/" + resp.Secret.LeaseID,
+ })
+ }
+
+ // Test step returned an error.
+ if err != nil {
+ // But if an error is expected, do not fail the test step,
+ // regardless of whether the error is a 'logical.ErrorResponse'
+ // or not. Set the err to nil. If the error is a logical.ErrorResponse,
+ // it will be handled later.
+ if s.ErrorOk {
+ err = nil
+ } else {
+ // If the error is not expected, fail right away.
+ tt.Error(fmt.Sprintf("Failed step %d: %s", i+1, err))
+ break
+ }
+ }
+
+ // If the error is a 'logical.ErrorResponse' and if error was not expected,
+ // set the error so that this can be caught below.
+ if resp.IsError() && !s.ErrorOk {
+ err = fmt.Errorf("Erroneous response:\n\n%#v", resp)
+ }
+
+ // Either the 'err' was nil or if an error was expected, it was set to nil.
+ // Call the 'Check' function if there is one.
+ //
+ // TODO: This works perfectly for now, but it would be better if 'Check'
+ // function takes in both the response object and the error, and decide on
+ // the action on its own.
+ if err == nil && s.Check != nil {
+ // Call the test method
+ err = s.Check(resp)
+ }
+
+ if err != nil {
+ tt.Error(fmt.Sprintf("Failed step %d: %s", i+1, err))
+ break
+ }
+ }
+
+ // Revoke any secrets we might have.
+ var failedRevokes []*logical.Secret
+ for _, req := range revoke {
+ if log.IsWarn() {
+ log.Warn("Revoking secret", "secret", fmt.Sprintf("%#v", req))
+ }
+ req.ClientToken = client.Token()
+ resp, err := core.HandleRequest(req)
+ if err == nil && resp.IsError() {
+ err = fmt.Errorf("Erroneous response:\n\n%#v", resp)
+ }
+ if err != nil {
+ failedRevokes = append(failedRevokes, req.Secret)
+ tt.Error(fmt.Sprintf("Revoke error: %s", err))
+ }
+ }
+
+ // Perform any rollbacks. This should no-op if there aren't any.
+ // We set the "immediate" flag here that any backend can pick up on
+ // to do all rollbacks immediately even if the WAL entries are new.
+ log.Warn("Requesting RollbackOperation")
+ req := logical.RollbackRequest(prefix + "/")
+ req.Data["immediate"] = true
+ req.ClientToken = client.Token()
+ resp, err := core.HandleRequest(req)
+ if err == nil && resp.IsError() {
+ err = fmt.Errorf("Erroneous response:\n\n%#v", resp)
+ }
+ if err != nil {
+ if !errwrap.Contains(err, logical.ErrUnsupportedOperation.Error()) {
+ tt.Error(fmt.Sprintf("[ERR] Rollback error: %s", err))
+ }
+ }
+
+ // If we have any failed revokes, log it.
+ if len(failedRevokes) > 0 {
+ for _, s := range failedRevokes {
+ tt.Error(fmt.Sprintf(
+ "WARNING: Revoking the following secret failed. It may\n"+
+ "still exist. Please verify:\n\n%#v",
+ s))
+ }
+ }
+
+ // Cleanup
+ if c.Teardown != nil {
+ c.Teardown()
+ }
+}
+
+// TestCheckMulti is a helper to have multiple checks.
+func TestCheckMulti(fs ...TestCheckFunc) TestCheckFunc {
+ return func(resp *logical.Response) error {
+ for _, f := range fs {
+ if err := f(resp); err != nil {
+ return err
+ }
+ }
+
+ return nil
+ }
+}
+
+// TestCheckAuth is a helper to check that a request generated an
+// auth token with the proper policies.
+func TestCheckAuth(policies []string) TestCheckFunc {
+ return func(resp *logical.Response) error {
+ if resp == nil || resp.Auth == nil {
+ return fmt.Errorf("no auth in response")
+ }
+ expected := make([]string, len(policies))
+ copy(expected, policies)
+ sort.Strings(expected)
+ ret := make([]string, len(resp.Auth.Policies))
+ copy(ret, resp.Auth.Policies)
+ sort.Strings(ret)
+ if !reflect.DeepEqual(ret, expected) {
+ return fmt.Errorf("invalid policies: expected %#v, got %#v", expected, ret)
+ }
+
+ return nil
+ }
+}
+
+// TestCheckAuthDisplayName is a helper to check that a request generated a
+// valid display name.
+func TestCheckAuthDisplayName(n string) TestCheckFunc {
+ return func(resp *logical.Response) error {
+ if resp.Auth == nil {
+ return fmt.Errorf("no auth in response")
+ }
+ if n != "" && resp.Auth.DisplayName != "mnt-"+n {
+ return fmt.Errorf("invalid display name: %#v", resp.Auth.DisplayName)
+ }
+
+ return nil
+ }
+}
+
+// TestCheckError is a helper to check that a response is an error.
+func TestCheckError() TestCheckFunc {
+ return func(resp *logical.Response) error {
+ if !resp.IsError() {
+ return fmt.Errorf("response should be error")
+ }
+
+ return nil
+ }
+}
+
+// TestT is the interface used to handle the test lifecycle of a test.
+//
+// Users should just use a *testing.T object, which implements this.
+type TestT interface {
+ Error(args ...interface{})
+ Fatal(args ...interface{})
+ Skip(args ...interface{})
+}
+
+var testTesting = false
diff --git a/vendor/github.com/hashicorp/vault/logical/testing/testing_test.go b/vendor/github.com/hashicorp/vault/logical/testing/testing_test.go
new file mode 100644
index 0000000..5a4096b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/testing/testing_test.go
@@ -0,0 +1,90 @@
+package testing
+
+import (
+ "os"
+ "testing"
+)
+
+func init() {
+ testTesting = true
+
+ if err := os.Setenv(TestEnvVar, "1"); err != nil {
+ panic(err)
+ }
+}
+
+func TestTest_noEnv(t *testing.T) {
+ // Unset the variable
+ if err := os.Setenv(TestEnvVar, ""); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ defer os.Setenv(TestEnvVar, "1")
+
+ mt := new(mockT)
+ Test(mt, TestCase{
+ AcceptanceTest: true,
+ })
+
+ if !mt.SkipCalled {
+ t.Fatal("skip not called")
+ }
+}
+
+func TestTest_preCheck(t *testing.T) {
+ called := false
+
+ mt := new(mockT)
+ Test(mt, TestCase{
+ PreCheck: func() { called = true },
+ })
+
+ if !called {
+ t.Fatal("precheck should be called")
+ }
+}
+
+// mockT implements TestT for testing
+type mockT struct {
+ ErrorCalled bool
+ ErrorArgs []interface{}
+ FatalCalled bool
+ FatalArgs []interface{}
+ SkipCalled bool
+ SkipArgs []interface{}
+
+ f bool
+}
+
+func (t *mockT) Error(args ...interface{}) {
+ t.ErrorCalled = true
+ t.ErrorArgs = args
+ t.f = true
+}
+
+func (t *mockT) Fatal(args ...interface{}) {
+ t.FatalCalled = true
+ t.FatalArgs = args
+ t.f = true
+}
+
+func (t *mockT) Skip(args ...interface{}) {
+ t.SkipCalled = true
+ t.SkipArgs = args
+ t.f = true
+}
+
+func (t *mockT) failed() bool {
+ return t.f
+}
+
+func (t *mockT) failMessage() string {
+ if t.FatalCalled {
+ return t.FatalArgs[0].(string)
+ } else if t.ErrorCalled {
+ return t.ErrorArgs[0].(string)
+ } else if t.SkipCalled {
+ return t.SkipArgs[0].(string)
+ }
+
+ return "unknown"
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/translate_response.go b/vendor/github.com/hashicorp/vault/logical/translate_response.go
new file mode 100644
index 0000000..048adaf
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/translate_response.go
@@ -0,0 +1,138 @@
+package logical
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "time"
+)
+
+// This logic was pulled from the http package so that it can be used for
+// encoding wrapped responses as well. It simply translates the logical request
+// to an http response, with the values we want and omitting the values we
+// don't.
+func LogicalResponseToHTTPResponse(input *Response) *HTTPResponse {
+ httpResp := &HTTPResponse{
+ Data: input.Data,
+ Warnings: input.Warnings(),
+ }
+
+ if input.Secret != nil {
+ httpResp.LeaseID = input.Secret.LeaseID
+ httpResp.Renewable = input.Secret.Renewable
+ httpResp.LeaseDuration = int(input.Secret.TTL.Seconds())
+ }
+
+ // If we have authentication information, then
+ // set up the result structure.
+ if input.Auth != nil {
+ httpResp.Auth = &HTTPAuth{
+ ClientToken: input.Auth.ClientToken,
+ Accessor: input.Auth.Accessor,
+ Policies: input.Auth.Policies,
+ Metadata: input.Auth.Metadata,
+ LeaseDuration: int(input.Auth.TTL.Seconds()),
+ Renewable: input.Auth.Renewable,
+ }
+ }
+
+ return httpResp
+}
+
+func HTTPResponseToLogicalResponse(input *HTTPResponse) *Response {
+ logicalResp := &Response{
+ Data: input.Data,
+ warnings: input.Warnings,
+ }
+
+ if input.LeaseID != "" {
+ logicalResp.Secret = &Secret{
+ LeaseID: input.LeaseID,
+ }
+ logicalResp.Secret.Renewable = input.Renewable
+ logicalResp.Secret.TTL = time.Second * time.Duration(input.LeaseDuration)
+ }
+
+ if input.Auth != nil {
+ logicalResp.Auth = &Auth{
+ ClientToken: input.Auth.ClientToken,
+ Accessor: input.Auth.Accessor,
+ Policies: input.Auth.Policies,
+ Metadata: input.Auth.Metadata,
+ }
+ logicalResp.Auth.Renewable = input.Auth.Renewable
+ logicalResp.Auth.TTL = time.Second * time.Duration(input.Auth.LeaseDuration)
+ }
+
+ return logicalResp
+}
+
+type HTTPResponse struct {
+ RequestID string `json:"request_id"`
+ LeaseID string `json:"lease_id"`
+ Renewable bool `json:"renewable"`
+ LeaseDuration int `json:"lease_duration"`
+ Data map[string]interface{} `json:"data"`
+ WrapInfo *HTTPWrapInfo `json:"wrap_info"`
+ Warnings []string `json:"warnings"`
+ Auth *HTTPAuth `json:"auth"`
+}
+
+type HTTPAuth struct {
+ ClientToken string `json:"client_token"`
+ Accessor string `json:"accessor"`
+ Policies []string `json:"policies"`
+ Metadata map[string]string `json:"metadata"`
+ LeaseDuration int `json:"lease_duration"`
+ Renewable bool `json:"renewable"`
+}
+
+type HTTPWrapInfo struct {
+ Token string `json:"token"`
+ TTL int `json:"ttl"`
+ CreationTime string `json:"creation_time"`
+ WrappedAccessor string `json:"wrapped_accessor,omitempty"`
+}
+
+type HTTPSysInjector struct {
+ Response *HTTPResponse
+}
+
+func (h HTTPSysInjector) MarshalJSON() ([]byte, error) {
+ j, err := json.Marshal(h.Response)
+ if err != nil {
+ return nil, err
+ }
+
+ // Fast path no data or empty data
+ if h.Response.Data == nil || len(h.Response.Data) == 0 {
+ return j, nil
+ }
+
+ // Marshaling a response will always be a JSON object, meaning it will
+ // always start with '{', so we hijack this to prepend necessary values
+
+ // Make a guess at the capacity, and write the object opener
+ buf := bytes.NewBuffer(make([]byte, 0, len(j)*2))
+ buf.WriteRune('{')
+
+ for k, v := range h.Response.Data {
+ // Marshal each key/value individually
+ mk, err := json.Marshal(k)
+ if err != nil {
+ return nil, err
+ }
+ mv, err := json.Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ // Write into the final buffer. We'll never have a valid response
+ // without any fields so we can unconditionally add a comma after each.
+ buf.WriteString(fmt.Sprintf("%s: %s, ", mk, mv))
+ }
+
+ // Add the rest, without the first '{'
+ buf.Write(j[1:])
+
+ return buf.Bytes(), nil
+}
diff --git a/vendor/github.com/hashicorp/vault/main.go b/vendor/github.com/hashicorp/vault/main.go
new file mode 100644
index 0000000..6cd34fe
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/main.go
@@ -0,0 +1,11 @@
+package main // import "github.com/hashicorp/vault"
+
+import (
+ "os"
+
+ "github.com/hashicorp/vault/cli"
+)
+
+func main() {
+ os.Exit(cli.Run(os.Args[1:]))
+}
diff --git a/vendor/github.com/hashicorp/vault/main_test.go b/vendor/github.com/hashicorp/vault/main_test.go
new file mode 100644
index 0000000..4c4c79a
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/main_test.go
@@ -0,0 +1,4 @@
+package main // import "github.com/hashicorp/vault"
+
+// This file is intentionally empty to force early versions of Go
+// to test compilation for tests.
diff --git a/vendor/github.com/hashicorp/vault/make.bat b/vendor/github.com/hashicorp/vault/make.bat
new file mode 100644
index 0000000..34adbfd
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/make.bat
@@ -0,0 +1,107 @@
+@echo off
+setlocal
+
+set _EXITCODE=0
+
+REM If no target is provided, default to test.
+if [%1]==[] goto test
+
+set _TARGETS=bin,dev,generate,test,testacc,testrace,vet
+
+REM Run target.
+for %%a in (%_TARGETS%) do (if x%1==x%%a goto %%a)
+goto usage
+
+REM bin generates the releaseable binaries for Vault
+:bin
+ call :generate
+ call .\scripts\windows\build.bat "%CD%"
+ goto :eof
+
+REM dev creates binaries for testing Vault locally. These are put
+REM into ./bin/ as well as %GOPATH%/bin
+:dev
+ call :generate
+ call .\scripts\windows\build.bat "%CD%" VAULT_DEV
+ goto :eof
+
+REM generate runs `go generate` to build the dynamically generated
+REM source files.
+:generate
+ go list ./... | findstr /v vendor | go generate
+ goto :eof
+
+REM test runs the unit tests and vets the code.
+:test
+ call :testsetup
+ go test %_TEST% %TESTARGS% -timeout=30s -parallel=4
+ call :setMaxExitCode %ERRORLEVEL%
+ echo.
+ goto vet
+
+REM testacc runs acceptance tests.
+:testacc
+ call :testsetup
+ if x%_TEST% == x./... goto testacc_fail
+ if x%_TEST% == x.\... goto testacc_fail
+ set VAULT_ACC=1
+ go test %_TEST% -v %TESTARGS% -timeout 45m
+ exit /b %ERRORLEVEL%
+:testacc_fail
+ echo ERROR: Set %%TEST%% to a specific package.
+ exit /b 1
+
+REM testrace runs the race checker.
+:testrace
+ call :testsetup
+ go test -race %_TEST% %TESTARGS%
+ exit /b %ERRORLEVEL%
+
+REM testsetup calls `go generate` and defines the variables VAULT_ACC
+REM and _TEST. VAULT_ACC is always cleared. _TEST defaults to the value
+REM of the TEST environment variable, provided that TEST is defined,
+REM otherwise _TEST it is set to "./...".
+:testsetup
+ call :generate
+ set VAULT_ACC=
+ set _TEST=./...
+ if defined TEST set _TEST=%TEST%
+ goto :eof
+
+REM vet runs the Go source code static analysis tool `vet` to find
+REM any common errors.
+:vet
+ set _VETARGS=-asmdecl -atomic -bool -buildtags -copylocks -methods -nilfunc -printf -rangeloops -shift -structtags -unsafeptr
+ if defined VETARGS set _VETARGS=%VETARGS%
+
+ go tool vet 2>nul
+ if %ERRORLEVEL% equ 3 go get golang.org/x/tools/cmd/vet
+
+ set _vetExitCode=0
+ set _VAULT_PKG_DIRS=%TEMP%\vault-pkg-dirs.txt
+
+ go list -f {{.Dir}} ./... | findstr /v vendor >"%_VAULT_PKG_DIRS%"
+ REM Skip the first row, which is the main vault package (.*github.com/hashicorp/vault$)
+ for /f "delims= skip=1" %%d in ("%_VAULT_PKG_DIRS%") do (
+ go tool vet %_VETARGS% "%%d"
+ if ERRORLEVEL 1 set _vetExitCode=1
+ call :setMaxExitCode %_vetExitCode%
+ )
+ del /f "%_VAULT_PKG_DIRS%" 2>NUL
+ if %_vetExitCode% equ 0 exit /b %_EXITCODE%
+ echo.
+ echo Vet found suspicious constructs. Please check the reported constructs
+ echo and fix them if necessary before submitting the code for reviewal.
+ exit /b %_EXITCODE%
+
+:setMaxExitCode
+ if %1 gtr %_EXITCODE% set _EXITCODE=%1
+ goto :eof
+
+:usage
+ echo usage: make [target]
+ echo.
+ echo target is in {%_TARGETS%}.
+ echo target defaults to test if none is provided.
+ exit /b 2
+ goto :eof
diff --git a/vendor/github.com/hashicorp/vault/meta/meta.go b/vendor/github.com/hashicorp/vault/meta/meta.go
new file mode 100644
index 0000000..0f5fef9
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/meta/meta.go
@@ -0,0 +1,208 @@
+package meta
+
+import (
+ "bufio"
+ "flag"
+ "io"
+
+ "github.com/hashicorp/errwrap"
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/command/token"
+ "github.com/mitchellh/cli"
+)
+
+// FlagSetFlags is an enum to define what flags are present in the
+// default FlagSet returned by Meta.FlagSet.
+type FlagSetFlags uint
+
+type TokenHelperFunc func() (token.TokenHelper, error)
+
+const (
+ FlagSetNone FlagSetFlags = 0
+ FlagSetServer FlagSetFlags = 1 << iota
+ FlagSetDefault = FlagSetServer
+)
+
+var (
+ additionalOptionsUsage = func() string {
+ return `
+ -wrap-ttl="" Indicates that the response should be wrapped in a
+ cubbyhole token with the requested TTL. The response
+ can be fetched by calling the "sys/wrapping/unwrap"
+ endpoint, passing in the wrappping token's ID. This
+ is a numeric string with an optional suffix
+ "s", "m", or "h"; if no suffix is specified it will
+ be parsed as seconds. May also be specified via
+ VAULT_WRAP_TTL.
+`
+ }
+)
+
+// Meta contains the meta-options and functionality that nearly every
+// Vault command inherits.
+type Meta struct {
+ ClientToken string
+ Ui cli.Ui
+
+ // The things below can be set, but aren't common
+ ForceAddress string // Address to force for API clients
+
+ // These are set by the command line flags.
+ flagAddress string
+ flagCACert string
+ flagCAPath string
+ flagClientCert string
+ flagClientKey string
+ flagWrapTTL string
+ flagInsecure bool
+
+ // Queried if no token can be found
+ TokenHelper TokenHelperFunc
+}
+
+func (m *Meta) DefaultWrappingLookupFunc(operation, path string) string {
+ if m.flagWrapTTL != "" {
+ return m.flagWrapTTL
+ }
+
+ return api.DefaultWrappingLookupFunc(operation, path)
+}
+
+// Client returns the API client to a Vault server given the configured
+// flag settings for this command.
+func (m *Meta) Client() (*api.Client, error) {
+ config := api.DefaultConfig()
+
+ err := config.ReadEnvironment()
+ if err != nil {
+ return nil, errwrap.Wrapf("error reading environment: {{err}}", err)
+ }
+
+ if m.flagAddress != "" {
+ config.Address = m.flagAddress
+ }
+ if m.ForceAddress != "" {
+ config.Address = m.ForceAddress
+ }
+ // If we need custom TLS configuration, then set it
+ if m.flagCACert != "" || m.flagCAPath != "" || m.flagClientCert != "" || m.flagClientKey != "" || m.flagInsecure {
+ t := &api.TLSConfig{
+ CACert: m.flagCACert,
+ CAPath: m.flagCAPath,
+ ClientCert: m.flagClientCert,
+ ClientKey: m.flagClientKey,
+ TLSServerName: "",
+ Insecure: m.flagInsecure,
+ }
+ config.ConfigureTLS(t)
+ }
+
+ // Build the client
+ client, err := api.NewClient(config)
+ if err != nil {
+ return nil, err
+ }
+
+ client.SetWrappingLookupFunc(m.DefaultWrappingLookupFunc)
+
+ // If we have a token directly, then set that
+ token := m.ClientToken
+
+ // Try to set the token to what is already stored
+ if token == "" {
+ token = client.Token()
+ }
+
+ // If we don't have a token, check the token helper
+ if token == "" {
+ if m.TokenHelper != nil {
+ // If we have a token, then set that
+ tokenHelper, err := m.TokenHelper()
+ if err != nil {
+ return nil, err
+ }
+ token, err = tokenHelper.Get()
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ // Set the token
+ if token != "" {
+ client.SetToken(token)
+ }
+
+ return client, nil
+}
+
+// FlagSet returns a FlagSet with the common flags that every
+// command implements. The exact behavior of FlagSet can be configured
+// using the flags as the second parameter, for example to disable
+// server settings on the commands that don't talk to a server.
+func (m *Meta) FlagSet(n string, fs FlagSetFlags) *flag.FlagSet {
+ f := flag.NewFlagSet(n, flag.ContinueOnError)
+
+ // FlagSetServer tells us to enable the settings for selecting
+ // the server information.
+ if fs&FlagSetServer != 0 {
+ f.StringVar(&m.flagAddress, "address", "", "")
+ f.StringVar(&m.flagCACert, "ca-cert", "", "")
+ f.StringVar(&m.flagCAPath, "ca-path", "", "")
+ f.StringVar(&m.flagClientCert, "client-cert", "", "")
+ f.StringVar(&m.flagClientKey, "client-key", "", "")
+ f.StringVar(&m.flagWrapTTL, "wrap-ttl", "", "")
+ f.BoolVar(&m.flagInsecure, "insecure", false, "")
+ f.BoolVar(&m.flagInsecure, "tls-skip-verify", false, "")
+ }
+
+ // Create an io.Writer that writes to our Ui properly for errors.
+ // This is kind of a hack, but it does the job. Basically: create
+ // a pipe, use a scanner to break it into lines, and output each line
+ // to the UI. Do this forever.
+ errR, errW := io.Pipe()
+ errScanner := bufio.NewScanner(errR)
+ go func() {
+ for errScanner.Scan() {
+ m.Ui.Error(errScanner.Text())
+ }
+ }()
+ f.SetOutput(errW)
+
+ return f
+}
+
+// GeneralOptionsUsage returns the usage documentation for commonly
+// available options
+func GeneralOptionsUsage() string {
+ general := `
+ -address=addr The address of the Vault server.
+ Overrides the VAULT_ADDR environment variable if set.
+
+ -ca-cert=path Path to a PEM encoded CA cert file to use to
+ verify the Vault server SSL certificate.
+ Overrides the VAULT_CACERT environment variable if set.
+
+ -ca-path=path Path to a directory of PEM encoded CA cert files
+ to verify the Vault server SSL certificate. If both
+ -ca-cert and -ca-path are specified, -ca-cert is used.
+ Overrides the VAULT_CAPATH environment variable if set.
+
+ -client-cert=path Path to a PEM encoded client certificate for TLS
+ authentication to the Vault server. Must also specify
+ -client-key. Overrides the VAULT_CLIENT_CERT
+ environment variable if set.
+
+ -client-key=path Path to an unencrypted PEM encoded private key
+ matching the client certificate from -client-cert.
+ Overrides the VAULT_CLIENT_KEY environment variable
+ if set.
+
+ -tls-skip-verify Do not verify TLS certificate. This is highly
+ not recommended. Verification will also be skipped
+ if VAULT_SKIP_VERIFY is set.
+`
+
+ general += additionalOptionsUsage()
+ return general
+}
diff --git a/vendor/github.com/hashicorp/vault/meta/meta_test.go b/vendor/github.com/hashicorp/vault/meta/meta_test.go
new file mode 100644
index 0000000..1ef9c13
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/meta/meta_test.go
@@ -0,0 +1,41 @@
+package meta
+
+import (
+ "flag"
+ "reflect"
+ "sort"
+ "testing"
+)
+
+func TestFlagSet(t *testing.T) {
+ cases := []struct {
+ Flags FlagSetFlags
+ Expected []string
+ }{
+ {
+ FlagSetNone,
+ []string{},
+ },
+ {
+ FlagSetServer,
+ []string{"address", "ca-cert", "ca-path", "client-cert", "client-key", "insecure", "tls-skip-verify", "wrap-ttl"},
+ },
+ }
+
+ for i, tc := range cases {
+ var m Meta
+ fs := m.FlagSet("foo", tc.Flags)
+
+ actual := make([]string, 0, 0)
+ fs.VisitAll(func(f *flag.Flag) {
+ actual = append(actual, f.Name)
+ })
+ sort.Strings(actual)
+ sort.Strings(tc.Expected)
+
+ if !reflect.DeepEqual(actual, tc.Expected) {
+ t.Fatalf("%d: flags: %#v\n\nExpected: %#v\nGot: %#v",
+ i, tc.Flags, tc.Expected, actual)
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/azure.go b/vendor/github.com/hashicorp/vault/physical/azure.go
new file mode 100644
index 0000000..4d5083e
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/azure.go
@@ -0,0 +1,190 @@
+package physical
+
+import (
+ "encoding/base64"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ log "github.com/mgutz/logxi/v1"
+
+ "github.com/Azure/azure-storage-go"
+ "github.com/armon/go-metrics"
+ "github.com/hashicorp/errwrap"
+)
+
+// MaxBlobSize at this time
+var MaxBlobSize = 1024 * 1024 * 4
+
+// AzureBackend is a physical backend that stores data
+// within an Azure blob container.
+type AzureBackend struct {
+ container string
+ client storage.BlobStorageClient
+ logger log.Logger
+ permitPool *PermitPool
+}
+
+// newAzureBackend constructs an Azure backend using a pre-existing
+// bucket. Credentials can be provided to the backend, sourced
+// from the environment, AWS credential files or by IAM role.
+func newAzureBackend(conf map[string]string, logger log.Logger) (Backend, error) {
+
+ container := os.Getenv("AZURE_BLOB_CONTAINER")
+ if container == "" {
+ container = conf["container"]
+ if container == "" {
+ return nil, fmt.Errorf("'container' must be set")
+ }
+ }
+
+ accountName := os.Getenv("AZURE_ACCOUNT_NAME")
+ if accountName == "" {
+ accountName = conf["accountName"]
+ if accountName == "" {
+ return nil, fmt.Errorf("'accountName' must be set")
+ }
+ }
+
+ accountKey := os.Getenv("AZURE_ACCOUNT_KEY")
+ if accountKey == "" {
+ accountKey = conf["accountKey"]
+ if accountKey == "" {
+ return nil, fmt.Errorf("'accountKey' must be set")
+ }
+ }
+
+ client, err := storage.NewBasicClient(accountName, accountKey)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create Azure client: %v", err)
+ }
+
+ contObj := client.GetBlobService().GetContainerReference(container)
+ created, err := contObj.CreateIfNotExists()
+ if err != nil {
+ return nil, fmt.Errorf("failed to upsert container: %v", err)
+ }
+ if created {
+ err = contObj.SetPermissions(storage.ContainerPermissions{
+ AccessType: storage.ContainerAccessTypePrivate,
+ }, 0, "")
+ if err != nil {
+ return nil, fmt.Errorf("failed to set permissions on newly-created container: %v", err)
+ }
+ }
+
+ maxParStr, ok := conf["max_parallel"]
+ var maxParInt int
+ if ok {
+ maxParInt, err = strconv.Atoi(maxParStr)
+ if err != nil {
+ return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
+ }
+ if logger.IsDebug() {
+ logger.Debug("azure: max_parallel set", "max_parallel", maxParInt)
+ }
+ }
+
+ a := &AzureBackend{
+ container: container,
+ client: client.GetBlobService(),
+ logger: logger,
+ permitPool: NewPermitPool(maxParInt),
+ }
+ return a, nil
+}
+
+// Put is used to insert or update an entry
+func (a *AzureBackend) Put(entry *Entry) error {
+ defer metrics.MeasureSince([]string{"azure", "put"}, time.Now())
+
+ if len(entry.Value) >= MaxBlobSize {
+ return fmt.Errorf("Value is bigger than the current supported limit of 4MBytes")
+ }
+
+ blockID := base64.StdEncoding.EncodeToString([]byte("AAAA"))
+ blocks := make([]storage.Block, 1)
+ blocks[0] = storage.Block{ID: blockID, Status: storage.BlockStatusLatest}
+
+ a.permitPool.Acquire()
+ defer a.permitPool.Release()
+
+ err := a.client.PutBlock(a.container, entry.Key, blockID, entry.Value)
+
+ err = a.client.PutBlockList(a.container, entry.Key, blocks)
+ return err
+}
+
+// Get is used to fetch an entry
+func (a *AzureBackend) Get(key string) (*Entry, error) {
+ defer metrics.MeasureSince([]string{"azure", "get"}, time.Now())
+
+ a.permitPool.Acquire()
+ defer a.permitPool.Release()
+
+ exists, _ := a.client.BlobExists(a.container, key)
+
+ if !exists {
+ return nil, nil
+ }
+
+ reader, err := a.client.GetBlob(a.container, key)
+
+ if err != nil {
+ return nil, err
+ }
+
+ data, err := ioutil.ReadAll(reader)
+
+ ent := &Entry{
+ Key: key,
+ Value: data,
+ }
+
+ return ent, err
+}
+
+// Delete is used to permanently delete an entry
+func (a *AzureBackend) Delete(key string) error {
+ defer metrics.MeasureSince([]string{"azure", "delete"}, time.Now())
+
+ a.permitPool.Acquire()
+ defer a.permitPool.Release()
+
+ _, err := a.client.DeleteBlobIfExists(a.container, key, nil)
+ return err
+}
+
+// List is used to list all the keys under a given
+// prefix, up to the next prefix.
+func (a *AzureBackend) List(prefix string) ([]string, error) {
+ defer metrics.MeasureSince([]string{"azure", "list"}, time.Now())
+
+ a.permitPool.Acquire()
+ defer a.permitPool.Release()
+
+ contObj := a.client.GetContainerReference(a.container)
+ list, err := contObj.ListBlobs(storage.ListBlobsParameters{Prefix: prefix})
+
+ if err != nil {
+ // Break early.
+ return nil, err
+ }
+
+ keys := []string{}
+ for _, blob := range list.Blobs {
+ key := strings.TrimPrefix(blob.Name, prefix)
+ if i := strings.Index(key, "/"); i == -1 {
+ keys = append(keys, key)
+ } else {
+ keys = appendIfMissing(keys, key[:i+1])
+ }
+ }
+
+ sort.Strings(keys)
+ return keys, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/azure_test.go b/vendor/github.com/hashicorp/vault/physical/azure_test.go
new file mode 100644
index 0000000..135e658
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/azure_test.go
@@ -0,0 +1,48 @@
+package physical
+
+import (
+ "fmt"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/helper/logformat"
+ log "github.com/mgutz/logxi/v1"
+
+ "github.com/Azure/azure-storage-go"
+)
+
+func TestAzureBackend(t *testing.T) {
+ if os.Getenv("AZURE_ACCOUNT_NAME") == "" ||
+ os.Getenv("AZURE_ACCOUNT_KEY") == "" {
+ t.SkipNow()
+ }
+
+ accountName := os.Getenv("AZURE_ACCOUNT_NAME")
+ accountKey := os.Getenv("AZURE_ACCOUNT_KEY")
+
+ ts := time.Now().UnixNano()
+ container := fmt.Sprintf("vault-test-%d", ts)
+
+ cleanupClient, _ := storage.NewBasicClient(accountName, accountKey)
+
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ backend, err := NewBackend("azure", logger, map[string]string{
+ "container": container,
+ "accountName": accountName,
+ "accountKey": accountKey,
+ })
+
+ defer func() {
+ contObj := cleanupClient.GetBlobService().GetContainerReference(container)
+ contObj.DeleteIfExists()
+ }()
+
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ testBackend(t, backend)
+ testBackend_ListPrefix(t, backend)
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/cache.go b/vendor/github.com/hashicorp/vault/physical/cache.go
new file mode 100644
index 0000000..f1b1365
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/cache.go
@@ -0,0 +1,156 @@
+package physical
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/golang-lru"
+ "github.com/hashicorp/vault/helper/locksutil"
+ log "github.com/mgutz/logxi/v1"
+)
+
+const (
+ // DefaultCacheSize is used if no cache size is specified for NewCache
+ DefaultCacheSize = 32 * 1024
+)
+
+// Cache is used to wrap an underlying physical backend
+// and provide an LRU cache layer on top. Most of the reads done by
+// Vault are for policy objects so there is a large read reduction
+// by using a simple write-through cache.
+type Cache struct {
+ backend Backend
+ transactional Transactional
+ lru *lru.TwoQueueCache
+ locks []*locksutil.LockEntry
+ logger log.Logger
+}
+
+// NewCache returns a physical cache of the given size.
+// If no size is provided, the default size is used.
+func NewCache(b Backend, size int, logger log.Logger) *Cache {
+ if size <= 0 {
+ size = DefaultCacheSize
+ }
+ if logger.IsTrace() {
+ logger.Trace("physical/cache: creating LRU cache", "size", size)
+ }
+ cache, _ := lru.New2Q(size)
+ c := &Cache{
+ backend: b,
+ lru: cache,
+ locks: locksutil.CreateLocks(),
+ logger: logger,
+ }
+
+ if txnl, ok := c.backend.(Transactional); ok {
+ c.transactional = txnl
+ }
+
+ return c
+}
+
+// Purge is used to clear the cache
+func (c *Cache) Purge() {
+ // Lock the world
+ for _, lock := range c.locks {
+ lock.Lock()
+ defer lock.Unlock()
+ }
+
+ c.lru.Purge()
+}
+
+func (c *Cache) Put(entry *Entry) error {
+ lock := locksutil.LockForKey(c.locks, entry.Key)
+ lock.Lock()
+ defer lock.Unlock()
+
+ err := c.backend.Put(entry)
+ if err == nil && !strings.HasPrefix(entry.Key, "core/") {
+ c.lru.Add(entry.Key, entry)
+ }
+ return err
+}
+
+func (c *Cache) Get(key string) (*Entry, error) {
+ lock := locksutil.LockForKey(c.locks, key)
+ lock.RLock()
+ defer lock.RUnlock()
+
+ // We do NOT cache negative results for keys in the 'core/' prefix
+ // otherwise we risk certain race conditions upstream. The primary issue is
+ // with the HA mode, we could potentially negatively cache the leader entry
+ // and cause leader discovery to fail.
+ if strings.HasPrefix(key, "core/") {
+ return c.backend.Get(key)
+ }
+
+ // Check the LRU first
+ if raw, ok := c.lru.Get(key); ok {
+ if raw == nil {
+ return nil, nil
+ } else {
+ return raw.(*Entry), nil
+ }
+ }
+
+ // Read from the underlying backend
+ ent, err := c.backend.Get(key)
+ if err != nil {
+ return nil, err
+ }
+
+ // Cache the result
+ if ent != nil {
+ c.lru.Add(key, ent)
+ }
+
+ return ent, nil
+}
+
+func (c *Cache) Delete(key string) error {
+ lock := locksutil.LockForKey(c.locks, key)
+ lock.Lock()
+ defer lock.Unlock()
+
+ err := c.backend.Delete(key)
+ if err == nil && !strings.HasPrefix(key, "core/") {
+ c.lru.Remove(key)
+ }
+ return err
+}
+
+func (c *Cache) List(prefix string) ([]string, error) {
+ // Always pass-through as this would be difficult to cache. For the same
+ // reason we don't lock as we can't reasonably know which locks to readlock
+ // ahead of time.
+ return c.backend.List(prefix)
+}
+
+func (c *Cache) Transaction(txns []TxnEntry) error {
+ if c.transactional == nil {
+ return fmt.Errorf("physical/cache: underlying backend does not support transactions")
+ }
+
+ // Lock the world
+ for _, lock := range c.locks {
+ lock.Lock()
+ defer lock.Unlock()
+ }
+
+ if err := c.transactional.Transaction(txns); err != nil {
+ return err
+ }
+
+ for _, txn := range txns {
+ switch txn.Operation {
+ case PutOperation:
+ c.lru.Add(txn.Entry.Key, txn.Entry)
+ case DeleteOperation:
+ c.lru.Remove(txn.Entry.Key)
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/cache_test.go b/vendor/github.com/hashicorp/vault/physical/cache_test.go
new file mode 100644
index 0000000..151cf99
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/cache_test.go
@@ -0,0 +1,143 @@
+package physical
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/helper/logformat"
+ log "github.com/mgutz/logxi/v1"
+)
+
+func TestCache(t *testing.T) {
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ inm := NewInmem(logger)
+ cache := NewCache(inm, 0, logger)
+ testBackend(t, cache)
+ testBackend_ListPrefix(t, cache)
+}
+
+func TestCache_Purge(t *testing.T) {
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ inm := NewInmem(logger)
+ cache := NewCache(inm, 0, logger)
+
+ ent := &Entry{
+ Key: "foo",
+ Value: []byte("bar"),
+ }
+ err := cache.Put(ent)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Delete from under
+ inm.Delete("foo")
+
+ // Read should work
+ out, err := cache.Get("foo")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out == nil {
+ t.Fatalf("should have key")
+ }
+
+ // Clear the cache
+ cache.Purge()
+
+ // Read should fail
+ out, err = cache.Get("foo")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out != nil {
+ t.Fatalf("should not have key")
+ }
+}
+
+func TestCache_IgnoreCore(t *testing.T) {
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ inm := NewInmem(logger)
+ cache := NewCache(inm, 0, logger)
+
+ var ent *Entry
+ var err error
+
+ // First try normal handling
+ ent = &Entry{
+ Key: "foo",
+ Value: []byte("bar"),
+ }
+ if err := cache.Put(ent); err != nil {
+ t.Fatal(err)
+ }
+ ent = &Entry{
+ Key: "foo",
+ Value: []byte("foobar"),
+ }
+ if err := inm.Put(ent); err != nil {
+ t.Fatal(err)
+ }
+ ent, err = cache.Get("foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if string(ent.Value) != "bar" {
+ t.Fatal("expected cached value")
+ }
+
+ // Now try core path
+ ent = &Entry{
+ Key: "core/foo",
+ Value: []byte("bar"),
+ }
+ if err := cache.Put(ent); err != nil {
+ t.Fatal(err)
+ }
+ ent = &Entry{
+ Key: "core/foo",
+ Value: []byte("foobar"),
+ }
+ if err := inm.Put(ent); err != nil {
+ t.Fatal(err)
+ }
+ ent, err = cache.Get("core/foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if string(ent.Value) != "foobar" {
+ t.Fatal("expected cached value")
+ }
+
+ // Now make sure looked-up values aren't added
+ ent = &Entry{
+ Key: "core/zip",
+ Value: []byte("zap"),
+ }
+ if err := inm.Put(ent); err != nil {
+ t.Fatal(err)
+ }
+ ent, err = cache.Get("core/zip")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if string(ent.Value) != "zap" {
+ t.Fatal("expected non-cached value")
+ }
+ ent = &Entry{
+ Key: "core/zip",
+ Value: []byte("zipzap"),
+ }
+ if err := inm.Put(ent); err != nil {
+ t.Fatal(err)
+ }
+ ent, err = cache.Get("core/zip")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if string(ent.Value) != "zipzap" {
+ t.Fatal("expected non-cached value")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/consul.go b/vendor/github.com/hashicorp/vault/physical/consul.go
new file mode 100644
index 0000000..93aabf0
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/consul.go
@@ -0,0 +1,789 @@
+package physical
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/url"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/net/http2"
+
+ log "github.com/mgutz/logxi/v1"
+
+ "crypto/tls"
+ "crypto/x509"
+
+ "github.com/armon/go-metrics"
+ "github.com/hashicorp/consul/api"
+ "github.com/hashicorp/consul/lib"
+ "github.com/hashicorp/errwrap"
+ "github.com/hashicorp/go-cleanhttp"
+ multierror "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/vault/helper/consts"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/helper/tlsutil"
+)
+
+const (
+ // checkJitterFactor specifies the jitter factor used to stagger checks
+ checkJitterFactor = 16
+
+ // checkMinBuffer specifies provides a guarantee that a check will not
+ // be executed too close to the TTL check timeout
+ checkMinBuffer = 100 * time.Millisecond
+
+ // consulRetryInterval specifies the retry duration to use when an
+ // API call to the Consul agent fails.
+ consulRetryInterval = 1 * time.Second
+
+ // defaultCheckTimeout changes the timeout of TTL checks
+ defaultCheckTimeout = 5 * time.Second
+
+ // DefaultServiceName is the default Consul service name used when
+ // advertising a Vault instance.
+ DefaultServiceName = "vault"
+
+ // reconcileTimeout is how often Vault should query Consul to detect
+ // and fix any state drift.
+ reconcileTimeout = 60 * time.Second
+
+ // consistencyModeDefault is the configuration value used to tell
+ // consul to use default consistency.
+ consistencyModeDefault = "default"
+
+ // consistencyModeStrong is the configuration value used to tell
+ // consul to use strong consistency.
+ consistencyModeStrong = "strong"
+)
+
+type notifyEvent struct{}
+
+// ConsulBackend is a physical backend that stores data at specific
+// prefix within Consul. It is used for most production situations as
+// it allows Vault to run on multiple machines in a highly-available manner.
+type ConsulBackend struct {
+ path string
+ logger log.Logger
+ client *api.Client
+ kv *api.KV
+ permitPool *PermitPool
+ serviceLock sync.RWMutex
+ redirectHost string
+ redirectPort int64
+ serviceName string
+ serviceTags []string
+ disableRegistration bool
+ checkTimeout time.Duration
+ consistencyMode string
+
+ notifyActiveCh chan notifyEvent
+ notifySealedCh chan notifyEvent
+}
+
+// newConsulBackend constructs a Consul backend using the given API client
+// and the prefix in the KV store.
+func newConsulBackend(conf map[string]string, logger log.Logger) (Backend, error) {
+ // Get the path in Consul
+ path, ok := conf["path"]
+ if !ok {
+ path = "vault/"
+ }
+ if logger.IsDebug() {
+ logger.Debug("physical/consul: config path set", "path", path)
+ }
+
+ // Ensure path is suffixed but not prefixed
+ if !strings.HasSuffix(path, "/") {
+ logger.Warn("physical/consul: appending trailing forward slash to path")
+ path += "/"
+ }
+ if strings.HasPrefix(path, "/") {
+ logger.Warn("physical/consul: trimming path of its forward slash")
+ path = strings.TrimPrefix(path, "/")
+ }
+
+ // Allow admins to disable consul integration
+ disableReg, ok := conf["disable_registration"]
+ var disableRegistration bool
+ if ok && disableReg != "" {
+ b, err := strconv.ParseBool(disableReg)
+ if err != nil {
+ return nil, errwrap.Wrapf("failed parsing disable_registration parameter: {{err}}", err)
+ }
+ disableRegistration = b
+ }
+ if logger.IsDebug() {
+ logger.Debug("physical/consul: config disable_registration set", "disable_registration", disableRegistration)
+ }
+
+ // Get the service name to advertise in Consul
+ service, ok := conf["service"]
+ if !ok {
+ service = DefaultServiceName
+ }
+ if logger.IsDebug() {
+ logger.Debug("physical/consul: config service set", "service", service)
+ }
+
+ // Get the additional tags to attach to the registered service name
+ tags := conf["service_tags"]
+
+ if logger.IsDebug() {
+ logger.Debug("physical/consul: config service_tags set", "service_tags", tags)
+ }
+
+ checkTimeout := defaultCheckTimeout
+ checkTimeoutStr, ok := conf["check_timeout"]
+ if ok {
+ d, err := time.ParseDuration(checkTimeoutStr)
+ if err != nil {
+ return nil, err
+ }
+
+ min, _ := lib.DurationMinusBufferDomain(d, checkMinBuffer, checkJitterFactor)
+ if min < checkMinBuffer {
+ return nil, fmt.Errorf("Consul check_timeout must be greater than %v", min)
+ }
+
+ checkTimeout = d
+ if logger.IsDebug() {
+ logger.Debug("physical/consul: config check_timeout set", "check_timeout", d)
+ }
+ }
+
+ // Configure the client
+ consulConf := api.DefaultConfig()
+ // Set MaxIdleConnsPerHost to the number of processes used in expiration.Restore
+ tr := cleanhttp.DefaultPooledTransport()
+ tr.MaxIdleConnsPerHost = consts.ExpirationRestoreWorkerCount
+ consulConf.HttpClient.Transport = tr
+
+ if addr, ok := conf["address"]; ok {
+ consulConf.Address = addr
+ if logger.IsDebug() {
+ logger.Debug("physical/consul: config address set", "address", addr)
+ }
+ }
+ if scheme, ok := conf["scheme"]; ok {
+ consulConf.Scheme = scheme
+ if logger.IsDebug() {
+ logger.Debug("physical/consul: config scheme set", "scheme", scheme)
+ }
+ }
+ if token, ok := conf["token"]; ok {
+ consulConf.Token = token
+ logger.Debug("physical/consul: config token set")
+ }
+
+ if consulConf.Scheme == "https" {
+ tlsClientConfig, err := setupTLSConfig(conf)
+ if err != nil {
+ return nil, err
+ }
+
+ transport := cleanhttp.DefaultPooledTransport()
+ transport.MaxIdleConnsPerHost = consts.ExpirationRestoreWorkerCount
+ transport.TLSClientConfig = tlsClientConfig
+ if err := http2.ConfigureTransport(transport); err != nil {
+ return nil, err
+ }
+ consulConf.HttpClient.Transport = transport
+ logger.Debug("physical/consul: configured TLS")
+ }
+
+ client, err := api.NewClient(consulConf)
+ if err != nil {
+ return nil, errwrap.Wrapf("client setup failed: {{err}}", err)
+ }
+
+ maxParStr, ok := conf["max_parallel"]
+ var maxParInt int
+ if ok {
+ maxParInt, err = strconv.Atoi(maxParStr)
+ if err != nil {
+ return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
+ }
+ if logger.IsDebug() {
+ logger.Debug("physical/consul: max_parallel set", "max_parallel", maxParInt)
+ }
+ }
+
+ consistencyMode, ok := conf["consistency_mode"]
+ if ok {
+ switch consistencyMode {
+ case consistencyModeDefault, consistencyModeStrong:
+ default:
+ return nil, fmt.Errorf("invalid consistency_mode value: %s", consistencyMode)
+ }
+ } else {
+ consistencyMode = consistencyModeDefault
+ }
+
+ // Setup the backend
+ c := &ConsulBackend{
+ path: path,
+ logger: logger,
+ client: client,
+ kv: client.KV(),
+ permitPool: NewPermitPool(maxParInt),
+ serviceName: service,
+ serviceTags: strutil.ParseDedupLowercaseAndSortStrings(tags, ","),
+ checkTimeout: checkTimeout,
+ disableRegistration: disableRegistration,
+ consistencyMode: consistencyMode,
+ notifyActiveCh: make(chan notifyEvent),
+ notifySealedCh: make(chan notifyEvent),
+ }
+ return c, nil
+}
+
+func setupTLSConfig(conf map[string]string) (*tls.Config, error) {
+ serverName := strings.Split(conf["address"], ":")
+
+ insecureSkipVerify := false
+ if _, ok := conf["tls_skip_verify"]; ok {
+ insecureSkipVerify = true
+ }
+
+ tlsMinVersionStr, ok := conf["tls_min_version"]
+ if !ok {
+ // Set the default value
+ tlsMinVersionStr = "tls12"
+ }
+
+ tlsMinVersion, ok := tlsutil.TLSLookup[tlsMinVersionStr]
+ if !ok {
+ return nil, fmt.Errorf("invalid 'tls_min_version'")
+ }
+
+ tlsClientConfig := &tls.Config{
+ MinVersion: tlsMinVersion,
+ InsecureSkipVerify: insecureSkipVerify,
+ ServerName: serverName[0],
+ }
+
+ _, okCert := conf["tls_cert_file"]
+ _, okKey := conf["tls_key_file"]
+
+ if okCert && okKey {
+ tlsCert, err := tls.LoadX509KeyPair(conf["tls_cert_file"], conf["tls_key_file"])
+ if err != nil {
+ return nil, fmt.Errorf("client tls setup failed: %v", err)
+ }
+
+ tlsClientConfig.Certificates = []tls.Certificate{tlsCert}
+ }
+
+ if tlsCaFile, ok := conf["tls_ca_file"]; ok {
+ caPool := x509.NewCertPool()
+
+ data, err := ioutil.ReadFile(tlsCaFile)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read CA file: %v", err)
+ }
+
+ if !caPool.AppendCertsFromPEM(data) {
+ return nil, fmt.Errorf("failed to parse CA certificate")
+ }
+
+ tlsClientConfig.RootCAs = caPool
+ }
+
+ return tlsClientConfig, nil
+}
+
+// Used to run multiple entries via a transaction
+func (c *ConsulBackend) Transaction(txns []TxnEntry) error {
+ if len(txns) == 0 {
+ return nil
+ }
+
+ ops := make([]*api.KVTxnOp, 0, len(txns))
+
+ for _, op := range txns {
+ cop := &api.KVTxnOp{
+ Key: c.path + op.Entry.Key,
+ }
+ switch op.Operation {
+ case DeleteOperation:
+ cop.Verb = api.KVDelete
+ case PutOperation:
+ cop.Verb = api.KVSet
+ cop.Value = op.Entry.Value
+ default:
+ return fmt.Errorf("%q is not a supported transaction operation", op.Operation)
+ }
+
+ ops = append(ops, cop)
+ }
+
+ c.permitPool.Acquire()
+ defer c.permitPool.Release()
+
+ ok, resp, _, err := c.kv.Txn(ops, nil)
+ if err != nil {
+ return err
+ }
+ if ok {
+ return nil
+ }
+
+ var retErr *multierror.Error
+ for _, res := range resp.Errors {
+ retErr = multierror.Append(retErr, errors.New(res.What))
+ }
+
+ return retErr
+}
+
+// Put is used to insert or update an entry
+func (c *ConsulBackend) Put(entry *Entry) error {
+ defer metrics.MeasureSince([]string{"consul", "put"}, time.Now())
+
+ c.permitPool.Acquire()
+ defer c.permitPool.Release()
+
+ pair := &api.KVPair{
+ Key: c.path + entry.Key,
+ Value: entry.Value,
+ }
+
+ _, err := c.kv.Put(pair, nil)
+ return err
+}
+
+// Get is used to fetch an entry
+func (c *ConsulBackend) Get(key string) (*Entry, error) {
+ defer metrics.MeasureSince([]string{"consul", "get"}, time.Now())
+
+ c.permitPool.Acquire()
+ defer c.permitPool.Release()
+
+ var queryOptions *api.QueryOptions
+ if c.consistencyMode == consistencyModeStrong {
+ queryOptions = &api.QueryOptions{
+ RequireConsistent: true,
+ }
+ }
+
+ pair, _, err := c.kv.Get(c.path+key, queryOptions)
+ if err != nil {
+ return nil, err
+ }
+ if pair == nil {
+ return nil, nil
+ }
+ ent := &Entry{
+ Key: key,
+ Value: pair.Value,
+ }
+ return ent, nil
+}
+
+// Delete is used to permanently delete an entry
+func (c *ConsulBackend) Delete(key string) error {
+ defer metrics.MeasureSince([]string{"consul", "delete"}, time.Now())
+
+ c.permitPool.Acquire()
+ defer c.permitPool.Release()
+
+ _, err := c.kv.Delete(c.path+key, nil)
+ return err
+}
+
+// List is used to list all the keys under a given
+// prefix, up to the next prefix.
+func (c *ConsulBackend) List(prefix string) ([]string, error) {
+ defer metrics.MeasureSince([]string{"consul", "list"}, time.Now())
+ scan := c.path + prefix
+
+ // The TrimPrefix call below will not work correctly if we have "//" at the
+ // end. This can happen in cases where you are e.g. listing the root of a
+ // prefix in a logical backend via "/" instead of ""
+ if strings.HasSuffix(scan, "//") {
+ scan = scan[:len(scan)-1]
+ }
+
+ c.permitPool.Acquire()
+ defer c.permitPool.Release()
+
+ out, _, err := c.kv.Keys(scan, "/", nil)
+ for idx, val := range out {
+ out[idx] = strings.TrimPrefix(val, scan)
+ }
+
+ return out, err
+}
+
+// Lock is used for mutual exclusion based on the given key.
+func (c *ConsulBackend) LockWith(key, value string) (Lock, error) {
+ // Create the lock
+ opts := &api.LockOptions{
+ Key: c.path + key,
+ Value: []byte(value),
+ SessionName: "Vault Lock",
+ MonitorRetries: 5,
+ }
+ lock, err := c.client.LockOpts(opts)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create lock: %v", err)
+ }
+ cl := &ConsulLock{
+ client: c.client,
+ key: c.path + key,
+ lock: lock,
+ consistencyMode: c.consistencyMode,
+ }
+ return cl, nil
+}
+
+// HAEnabled indicates whether the HA functionality should be exposed.
+// Currently always returns true.
+func (c *ConsulBackend) HAEnabled() bool {
+ return true
+}
+
+// DetectHostAddr is used to detect the host address by asking the Consul agent
+func (c *ConsulBackend) DetectHostAddr() (string, error) {
+ agent := c.client.Agent()
+ self, err := agent.Self()
+ if err != nil {
+ return "", err
+ }
+ addr, ok := self["Member"]["Addr"].(string)
+ if !ok {
+ return "", fmt.Errorf("Unable to convert an address to string")
+ }
+ return addr, nil
+}
+
+// ConsulLock is used to provide the Lock interface backed by Consul
+type ConsulLock struct {
+ client *api.Client
+ key string
+ lock *api.Lock
+ consistencyMode string
+}
+
+func (c *ConsulLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
+ return c.lock.Lock(stopCh)
+}
+
+func (c *ConsulLock) Unlock() error {
+ return c.lock.Unlock()
+}
+
+func (c *ConsulLock) Value() (bool, string, error) {
+ kv := c.client.KV()
+
+ var queryOptions *api.QueryOptions
+ if c.consistencyMode == consistencyModeStrong {
+ queryOptions = &api.QueryOptions{
+ RequireConsistent: true,
+ }
+ }
+
+ pair, _, err := kv.Get(c.key, queryOptions)
+ if err != nil {
+ return false, "", err
+ }
+ if pair == nil {
+ return false, "", nil
+ }
+ held := pair.Session != ""
+ value := string(pair.Value)
+ return held, value, nil
+}
+
+func (c *ConsulBackend) NotifyActiveStateChange() error {
+ select {
+ case c.notifyActiveCh <- notifyEvent{}:
+ default:
+ // NOTE: If this occurs Vault's active status could be out of
+ // sync with Consul until reconcileTimer expires.
+ c.logger.Warn("physical/consul: Concurrent state change notify dropped")
+ }
+
+ return nil
+}
+
+func (c *ConsulBackend) NotifySealedStateChange() error {
+ select {
+ case c.notifySealedCh <- notifyEvent{}:
+ default:
+ // NOTE: If this occurs Vault's sealed status could be out of
+ // sync with Consul until checkTimer expires.
+ c.logger.Warn("physical/consul: Concurrent sealed state change notify dropped")
+ }
+
+ return nil
+}
+
+func (c *ConsulBackend) checkDuration() time.Duration {
+ return lib.DurationMinusBuffer(c.checkTimeout, checkMinBuffer, checkJitterFactor)
+}
+
+func (c *ConsulBackend) RunServiceDiscovery(waitGroup *sync.WaitGroup, shutdownCh ShutdownChannel, redirectAddr string, activeFunc activeFunction, sealedFunc sealedFunction) (err error) {
+ if err := c.setRedirectAddr(redirectAddr); err != nil {
+ return err
+ }
+
+ // 'server' command will wait for the below goroutine to complete
+ waitGroup.Add(1)
+
+ go c.runEventDemuxer(waitGroup, shutdownCh, redirectAddr, activeFunc, sealedFunc)
+
+ return nil
+}
+
+func (c *ConsulBackend) runEventDemuxer(waitGroup *sync.WaitGroup, shutdownCh ShutdownChannel, redirectAddr string, activeFunc activeFunction, sealedFunc sealedFunction) {
+ // This defer statement should be executed last. So push it first.
+ defer waitGroup.Done()
+
+ // Fire the reconcileTimer immediately upon starting the event demuxer
+ reconcileTimer := time.NewTimer(0)
+ defer reconcileTimer.Stop()
+
+ // Schedule the first check. Consul TTL checks are passing by
+ // default, checkTimer does not need to be run immediately.
+ checkTimer := time.NewTimer(c.checkDuration())
+ defer checkTimer.Stop()
+
+ // Use a reactor pattern to handle and dispatch events to singleton
+ // goroutine handlers for execution. It is not acceptable to drop
+ // inbound events from Notify*().
+ //
+ // goroutines are dispatched if the demuxer can acquire a lock (via
+ // an atomic CAS incr) on the handler. Handlers are responsible for
+ // deregistering themselves (atomic CAS decr). Handlers and the
+ // demuxer share a lock to synchronize information at the beginning
+ // and end of a handler's life (or after a handler wakes up from
+ // sleeping during a back-off/retry).
+ var shutdown bool
+ var checkLock int64
+ var registeredServiceID string
+ var serviceRegLock int64
+
+ for !shutdown {
+ select {
+ case <-c.notifyActiveCh:
+ // Run reconcile immediately upon active state change notification
+ reconcileTimer.Reset(0)
+ case <-c.notifySealedCh:
+ // Run check timer immediately upon a seal state change notification
+ checkTimer.Reset(0)
+ case <-reconcileTimer.C:
+ // Unconditionally rearm the reconcileTimer
+ reconcileTimer.Reset(reconcileTimeout - lib.RandomStagger(reconcileTimeout/checkJitterFactor))
+
+ // Abort if service discovery is disabled or a
+ // reconcile handler is already active
+ if !c.disableRegistration && atomic.CompareAndSwapInt64(&serviceRegLock, 0, 1) {
+ // Enter handler with serviceRegLock held
+ go func() {
+ defer atomic.CompareAndSwapInt64(&serviceRegLock, 1, 0)
+ for !shutdown {
+ serviceID, err := c.reconcileConsul(registeredServiceID, activeFunc, sealedFunc)
+ if err != nil {
+ if c.logger.IsWarn() {
+ c.logger.Warn("physical/consul: reconcile unable to talk with Consul backend", "error", err)
+ }
+ time.Sleep(consulRetryInterval)
+ continue
+ }
+
+ c.serviceLock.Lock()
+ defer c.serviceLock.Unlock()
+
+ registeredServiceID = serviceID
+ return
+ }
+ }()
+ }
+ case <-checkTimer.C:
+ checkTimer.Reset(c.checkDuration())
+ // Abort if service discovery is disabled or a
+ // reconcile handler is active
+ if !c.disableRegistration && atomic.CompareAndSwapInt64(&checkLock, 0, 1) {
+ // Enter handler with checkLock held
+ go func() {
+ defer atomic.CompareAndSwapInt64(&checkLock, 1, 0)
+ for !shutdown {
+ sealed := sealedFunc()
+ if err := c.runCheck(sealed); err != nil {
+ if c.logger.IsWarn() {
+ c.logger.Warn("physical/consul: check unable to talk with Consul backend", "error", err)
+ }
+ time.Sleep(consulRetryInterval)
+ continue
+ }
+ return
+ }
+ }()
+ }
+ case <-shutdownCh:
+ c.logger.Info("physical/consul: Shutting down consul backend")
+ shutdown = true
+ }
+ }
+
+ c.serviceLock.RLock()
+ defer c.serviceLock.RUnlock()
+ if err := c.client.Agent().ServiceDeregister(registeredServiceID); err != nil {
+ if c.logger.IsWarn() {
+ c.logger.Warn("physical/consul: service deregistration failed", "error", err)
+ }
+ }
+}
+
+// checkID returns the ID used for a Consul Check. Assume at least a read
+// lock is held.
+func (c *ConsulBackend) checkID() string {
+ return fmt.Sprintf("%s:vault-sealed-check", c.serviceID())
+}
+
+// serviceID returns the Vault ServiceID for use in Consul. Assume at least
+// a read lock is held.
+func (c *ConsulBackend) serviceID() string {
+ return fmt.Sprintf("%s:%s:%d", c.serviceName, c.redirectHost, c.redirectPort)
+}
+
+// reconcileConsul queries the state of Vault Core and Consul and fixes up
+// Consul's state according to what's in Vault. reconcileConsul is called
+// without any locks held and can be run concurrently, therefore no changes
+// to ConsulBackend can be made in this method (i.e. wtb const receiver for
+// compiler enforced safety).
+func (c *ConsulBackend) reconcileConsul(registeredServiceID string, activeFunc activeFunction, sealedFunc sealedFunction) (serviceID string, err error) {
+ // Query vault Core for its current state
+ active := activeFunc()
+ sealed := sealedFunc()
+
+ agent := c.client.Agent()
+ catalog := c.client.Catalog()
+
+ serviceID = c.serviceID()
+
+ // Get the current state of Vault from Consul
+ var currentVaultService *api.CatalogService
+ if services, _, err := catalog.Service(c.serviceName, "", &api.QueryOptions{AllowStale: true}); err == nil {
+ for _, service := range services {
+ if serviceID == service.ServiceID {
+ currentVaultService = service
+ break
+ }
+ }
+ }
+
+ tags := c.fetchServiceTags(active)
+
+ var reregister bool
+
+ switch {
+ case currentVaultService == nil, registeredServiceID == "":
+ reregister = true
+ default:
+ switch {
+ case !strutil.EquivalentSlices(currentVaultService.ServiceTags, tags):
+ reregister = true
+ }
+ }
+
+ if !reregister {
+ // When re-registration is not required, return a valid serviceID
+ // to avoid registration in the next cycle.
+ return serviceID, nil
+ }
+
+ service := &api.AgentServiceRegistration{
+ ID: serviceID,
+ Name: c.serviceName,
+ Tags: tags,
+ Port: int(c.redirectPort),
+ Address: c.redirectHost,
+ EnableTagOverride: false,
+ }
+
+ checkStatus := api.HealthCritical
+ if !sealed {
+ checkStatus = api.HealthPassing
+ }
+
+ sealedCheck := &api.AgentCheckRegistration{
+ ID: c.checkID(),
+ Name: "Vault Sealed Status",
+ Notes: "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
+ ServiceID: serviceID,
+ AgentServiceCheck: api.AgentServiceCheck{
+ TTL: c.checkTimeout.String(),
+ Status: checkStatus,
+ },
+ }
+
+ if err := agent.ServiceRegister(service); err != nil {
+ return "", errwrap.Wrapf(`service registration failed: {{err}}`, err)
+ }
+
+ if err := agent.CheckRegister(sealedCheck); err != nil {
+ return serviceID, errwrap.Wrapf(`service check registration failed: {{err}}`, err)
+ }
+
+ return serviceID, nil
+}
+
+// runCheck immediately pushes a TTL check.
+func (c *ConsulBackend) runCheck(sealed bool) error {
+ // Run a TTL check
+ agent := c.client.Agent()
+ if !sealed {
+ return agent.PassTTL(c.checkID(), "Vault Unsealed")
+ } else {
+ return agent.FailTTL(c.checkID(), "Vault Sealed")
+ }
+}
+
+// fetchServiceTags returns all of the relevant tags for Consul.
+func (c *ConsulBackend) fetchServiceTags(active bool) []string {
+ activeTag := "standby"
+ if active {
+ activeTag = "active"
+ }
+ return append(c.serviceTags, activeTag)
+}
+
+func (c *ConsulBackend) setRedirectAddr(addr string) (err error) {
+ if addr == "" {
+ return fmt.Errorf("redirect address must not be empty")
+ }
+
+ url, err := url.Parse(addr)
+ if err != nil {
+ return errwrap.Wrapf(fmt.Sprintf(`failed to parse redirect URL "%v": {{err}}`, addr), err)
+ }
+
+ var portStr string
+ c.redirectHost, portStr, err = net.SplitHostPort(url.Host)
+ if err != nil {
+ if url.Scheme == "http" {
+ portStr = "80"
+ } else if url.Scheme == "https" {
+ portStr = "443"
+ } else if url.Scheme == "unix" {
+ portStr = "-1"
+ c.redirectHost = url.Path
+ } else {
+ return errwrap.Wrapf(fmt.Sprintf(`failed to find a host:port in redirect address "%v": {{err}}`, url.Host), err)
+ }
+ }
+ c.redirectPort, err = strconv.ParseInt(portStr, 10, 0)
+ if err != nil || c.redirectPort < -1 || c.redirectPort > 65535 {
+ return errwrap.Wrapf(fmt.Sprintf(`failed to parse valid port "%v": {{err}}`, portStr), err)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/consul_test.go b/vendor/github.com/hashicorp/vault/physical/consul_test.go
new file mode 100644
index 0000000..59b1294
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/consul_test.go
@@ -0,0 +1,540 @@
+package physical
+
+import (
+ "fmt"
+ "math/rand"
+ "os"
+ "reflect"
+ "sync"
+ "testing"
+ "time"
+
+ log "github.com/mgutz/logxi/v1"
+
+ "github.com/hashicorp/consul/api"
+ "github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/helper/strutil"
+ dockertest "gopkg.in/ory-am/dockertest.v2"
+)
+
+type consulConf map[string]string
+
+var (
+ addrCount int = 0
+ testImagePull sync.Once
+)
+
+func testHostIP() string {
+ a := addrCount
+ addrCount++
+ return fmt.Sprintf("127.0.0.%d", a)
+}
+
+func testConsulBackend(t *testing.T) *ConsulBackend {
+ return testConsulBackendConfig(t, &consulConf{})
+}
+
+func testConsulBackendConfig(t *testing.T, conf *consulConf) *ConsulBackend {
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ be, err := newConsulBackend(*conf, logger)
+ if err != nil {
+ t.Fatalf("Expected Consul to initialize: %v", err)
+ }
+
+ c, ok := be.(*ConsulBackend)
+ if !ok {
+ t.Fatalf("Expected ConsulBackend")
+ }
+
+ return c
+}
+
+func testConsul_testConsulBackend(t *testing.T) {
+ c := testConsulBackend(t)
+ if c == nil {
+ t.Fatalf("bad")
+ }
+}
+
+func testActiveFunc(activePct float64) activeFunction {
+ return func() bool {
+ var active bool
+ standbyProb := rand.Float64()
+ if standbyProb > activePct {
+ active = true
+ }
+ return active
+ }
+}
+
+func testSealedFunc(sealedPct float64) sealedFunction {
+ return func() bool {
+ var sealed bool
+ unsealedProb := rand.Float64()
+ if unsealedProb > sealedPct {
+ sealed = true
+ }
+ return sealed
+ }
+}
+
+func TestConsul_ServiceTags(t *testing.T) {
+ consulConfig := map[string]string{
+ "path": "seaTech/",
+ "service": "astronomy",
+ "service_tags": "deadbeef, cafeefac, deadc0de, feedface",
+ "redirect_addr": "http://127.0.0.2:8200",
+ "check_timeout": "6s",
+ "address": "127.0.0.2",
+ "scheme": "https",
+ "token": "deadbeef-cafeefac-deadc0de-feedface",
+ "max_parallel": "4",
+ "disable_registration": "false",
+ }
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ be, err := newConsulBackend(consulConfig, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ c, ok := be.(*ConsulBackend)
+ if !ok {
+ t.Fatalf("failed to create physical Consul backend")
+ }
+
+ expected := []string{"deadbeef", "cafeefac", "deadc0de", "feedface"}
+ actual := c.fetchServiceTags(false)
+ if !strutil.EquivalentSlices(actual, append(expected, "standby")) {
+ t.Fatalf("bad: expected:%s actual:%s", append(expected, "standby"), actual)
+ }
+
+ actual = c.fetchServiceTags(true)
+ if !strutil.EquivalentSlices(actual, append(expected, "active")) {
+ t.Fatalf("bad: expected:%s actual:%s", append(expected, "active"), actual)
+ }
+}
+
+func TestConsul_newConsulBackend(t *testing.T) {
+ tests := []struct {
+ name string
+ consulConfig map[string]string
+ fail bool
+ redirectAddr string
+ checkTimeout time.Duration
+ path string
+ service string
+ address string
+ scheme string
+ token string
+ max_parallel int
+ disableReg bool
+ consistencyMode string
+ }{
+ {
+ name: "Valid default config",
+ consulConfig: map[string]string{},
+ checkTimeout: 5 * time.Second,
+ redirectAddr: "http://127.0.0.1:8200",
+ path: "vault/",
+ service: "vault",
+ address: "127.0.0.1:8500",
+ scheme: "http",
+ token: "",
+ max_parallel: 4,
+ disableReg: false,
+ consistencyMode: "default",
+ },
+ {
+ name: "Valid modified config",
+ consulConfig: map[string]string{
+ "path": "seaTech/",
+ "service": "astronomy",
+ "redirect_addr": "http://127.0.0.2:8200",
+ "check_timeout": "6s",
+ "address": "127.0.0.2",
+ "scheme": "https",
+ "token": "deadbeef-cafeefac-deadc0de-feedface",
+ "max_parallel": "4",
+ "disable_registration": "false",
+ "consistency_mode": "strong",
+ },
+ checkTimeout: 6 * time.Second,
+ path: "seaTech/",
+ service: "astronomy",
+ redirectAddr: "http://127.0.0.2:8200",
+ address: "127.0.0.2",
+ scheme: "https",
+ token: "deadbeef-cafeefac-deadc0de-feedface",
+ max_parallel: 4,
+ consistencyMode: "strong",
+ },
+ {
+ name: "check timeout too short",
+ fail: true,
+ consulConfig: map[string]string{
+ "check_timeout": "99ms",
+ },
+ },
+ }
+
+ for _, test := range tests {
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ be, err := newConsulBackend(test.consulConfig, logger)
+ if test.fail {
+ if err == nil {
+ t.Fatalf(`Expected config "%s" to fail`, test.name)
+ } else {
+ continue
+ }
+ } else if !test.fail && err != nil {
+ t.Fatalf("Expected config %s to not fail: %v", test.name, err)
+ }
+
+ c, ok := be.(*ConsulBackend)
+ if !ok {
+ t.Fatalf("Expected ConsulBackend: %s", test.name)
+ }
+ c.disableRegistration = true
+
+ if c.disableRegistration == false {
+ addr := os.Getenv("CONSUL_HTTP_ADDR")
+ if addr == "" {
+ continue
+ }
+ }
+
+ var shutdownCh ShutdownChannel
+ waitGroup := &sync.WaitGroup{}
+ if err := c.RunServiceDiscovery(waitGroup, shutdownCh, test.redirectAddr, testActiveFunc(0.5), testSealedFunc(0.5)); err != nil {
+ t.Fatalf("bad: %v", err)
+ }
+
+ if test.checkTimeout != c.checkTimeout {
+ t.Errorf("bad: %v != %v", test.checkTimeout, c.checkTimeout)
+ }
+
+ if test.path != c.path {
+ t.Errorf("bad: %s %v != %v", test.name, test.path, c.path)
+ }
+
+ if test.service != c.serviceName {
+ t.Errorf("bad: %v != %v", test.service, c.serviceName)
+ }
+
+ if test.consistencyMode != c.consistencyMode {
+ t.Errorf("bad consistency_mode value: %v != %v", test.consistencyMode, c.consistencyMode)
+ }
+
+ // FIXME(sean@): Unable to test max_parallel
+ // if test.max_parallel != cap(c.permitPool) {
+ // t.Errorf("bad: %v != %v", test.max_parallel, cap(c.permitPool))
+ // }
+ }
+}
+
+func TestConsul_serviceTags(t *testing.T) {
+ tests := []struct {
+ active bool
+ tags []string
+ }{
+ {
+ active: true,
+ tags: []string{"active"},
+ },
+ {
+ active: false,
+ tags: []string{"standby"},
+ },
+ }
+
+ c := testConsulBackend(t)
+
+ for _, test := range tests {
+ tags := c.fetchServiceTags(test.active)
+ if !reflect.DeepEqual(tags[:], test.tags[:]) {
+ t.Errorf("Bad %v: %v %v", test.active, tags, test.tags)
+ }
+ }
+}
+
+func TestConsul_setRedirectAddr(t *testing.T) {
+ tests := []struct {
+ addr string
+ host string
+ port int64
+ pass bool
+ }{
+ {
+ addr: "http://127.0.0.1:8200/",
+ host: "127.0.0.1",
+ port: 8200,
+ pass: true,
+ },
+ {
+ addr: "http://127.0.0.1:8200",
+ host: "127.0.0.1",
+ port: 8200,
+ pass: true,
+ },
+ {
+ addr: "https://127.0.0.1:8200",
+ host: "127.0.0.1",
+ port: 8200,
+ pass: true,
+ },
+ {
+ addr: "unix:///tmp/.vault.addr.sock",
+ host: "/tmp/.vault.addr.sock",
+ port: -1,
+ pass: true,
+ },
+ {
+ addr: "127.0.0.1:8200",
+ pass: false,
+ },
+ {
+ addr: "127.0.0.1",
+ pass: false,
+ },
+ }
+ for _, test := range tests {
+ c := testConsulBackend(t)
+ err := c.setRedirectAddr(test.addr)
+ if test.pass {
+ if err != nil {
+ t.Fatalf("bad: %v", err)
+ }
+ } else {
+ if err == nil {
+ t.Fatalf("bad, expected fail")
+ } else {
+ continue
+ }
+ }
+
+ if c.redirectHost != test.host {
+ t.Fatalf("bad: %v != %v", c.redirectHost, test.host)
+ }
+
+ if c.redirectPort != test.port {
+ t.Fatalf("bad: %v != %v", c.redirectPort, test.port)
+ }
+ }
+}
+
+func TestConsul_NotifyActiveStateChange(t *testing.T) {
+ c := testConsulBackend(t)
+
+ if err := c.NotifyActiveStateChange(); err != nil {
+ t.Fatalf("bad: %v", err)
+ }
+}
+
+func TestConsul_NotifySealedStateChange(t *testing.T) {
+ c := testConsulBackend(t)
+
+ if err := c.NotifySealedStateChange(); err != nil {
+ t.Fatalf("bad: %v", err)
+ }
+}
+
+func TestConsul_serviceID(t *testing.T) {
+ passingTests := []struct {
+ name string
+ redirectAddr string
+ serviceName string
+ expected string
+ }{
+ {
+ name: "valid host w/o slash",
+ redirectAddr: "http://127.0.0.1:8200",
+ serviceName: "sea-tech-astronomy",
+ expected: "sea-tech-astronomy:127.0.0.1:8200",
+ },
+ {
+ name: "valid host w/ slash",
+ redirectAddr: "http://127.0.0.1:8200/",
+ serviceName: "sea-tech-astronomy",
+ expected: "sea-tech-astronomy:127.0.0.1:8200",
+ },
+ {
+ name: "valid https host w/ slash",
+ redirectAddr: "https://127.0.0.1:8200/",
+ serviceName: "sea-tech-astronomy",
+ expected: "sea-tech-astronomy:127.0.0.1:8200",
+ },
+ }
+
+ for _, test := range passingTests {
+ c := testConsulBackendConfig(t, &consulConf{
+ "service": test.serviceName,
+ })
+
+ if err := c.setRedirectAddr(test.redirectAddr); err != nil {
+ t.Fatalf("bad: %s %v", test.name, err)
+ }
+
+ serviceID := c.serviceID()
+ if serviceID != test.expected {
+ t.Fatalf("bad: %v != %v", serviceID, test.expected)
+ }
+ }
+}
+
+func TestConsulBackend(t *testing.T) {
+ var token string
+ addr := os.Getenv("CONSUL_HTTP_ADDR")
+ if addr == "" {
+ cid, connURL := prepareTestContainer(t)
+ if cid != "" {
+ defer cleanupTestContainer(t, cid)
+ }
+ addr = connURL
+ token = dockertest.ConsulACLMasterToken
+ }
+
+ conf := api.DefaultConfig()
+ conf.Address = addr
+ conf.Token = token
+ client, err := api.NewClient(conf)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ randPath := fmt.Sprintf("vault-%d/", time.Now().Unix())
+ defer func() {
+ client.KV().DeleteTree(randPath, nil)
+ }()
+
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ b, err := NewBackend("consul", logger, map[string]string{
+ "address": conf.Address,
+ "path": randPath,
+ "max_parallel": "256",
+ "token": conf.Token,
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ testBackend(t, b)
+ testBackend_ListPrefix(t, b)
+}
+
+func TestConsulHABackend(t *testing.T) {
+ var token string
+ addr := os.Getenv("CONSUL_HTTP_ADDR")
+ if addr == "" {
+ cid, connURL := prepareTestContainer(t)
+ if cid != "" {
+ defer cleanupTestContainer(t, cid)
+ }
+ addr = connURL
+ token = dockertest.ConsulACLMasterToken
+ }
+
+ conf := api.DefaultConfig()
+ conf.Address = addr
+ conf.Token = token
+ client, err := api.NewClient(conf)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ randPath := fmt.Sprintf("vault-%d/", time.Now().Unix())
+ defer func() {
+ client.KV().DeleteTree(randPath, nil)
+ }()
+
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ b, err := NewBackend("consul", logger, map[string]string{
+ "address": conf.Address,
+ "path": randPath,
+ "max_parallel": "-1",
+ "token": conf.Token,
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ ha, ok := b.(HABackend)
+ if !ok {
+ t.Fatalf("consul does not implement HABackend")
+ }
+ testHABackend(t, ha, ha)
+
+ detect, ok := b.(RedirectDetect)
+ if !ok {
+ t.Fatalf("consul does not implement RedirectDetect")
+ }
+ host, err := detect.DetectHostAddr()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if host == "" {
+ t.Fatalf("bad addr: %v", host)
+ }
+}
+
+func prepareTestContainer(t *testing.T) (cid dockertest.ContainerID, retAddress string) {
+ if os.Getenv("CONSUL_HTTP_ADDR") != "" {
+ return "", os.Getenv("CONSUL_HTTP_ADDR")
+ }
+
+ // Without this the checks for whether the container has started seem to
+ // never actually pass. There's really no reason to expose the test
+ // containers, so don't.
+ dockertest.BindDockerToLocalhost = "yep"
+
+ testImagePull.Do(func() {
+ dockertest.Pull(dockertest.ConsulImageName)
+ })
+
+ try := 0
+ cid, connErr := dockertest.ConnectToConsul(60, 500*time.Millisecond, func(connAddress string) bool {
+ try += 1
+ // Build a client and verify that the credentials work
+ config := api.DefaultConfig()
+ config.Address = connAddress
+ config.Token = dockertest.ConsulACLMasterToken
+ client, err := api.NewClient(config)
+ if err != nil {
+ if try > 50 {
+ panic(err)
+ }
+ return false
+ }
+
+ _, err = client.KV().Put(&api.KVPair{
+ Key: "setuptest",
+ Value: []byte("setuptest"),
+ }, nil)
+ if err != nil {
+ if try > 50 {
+ panic(err)
+ }
+ return false
+ }
+
+ retAddress = connAddress
+ return true
+ })
+
+ if connErr != nil {
+ t.Fatalf("could not connect to consul: %v", connErr)
+ }
+
+ return
+}
+
+func cleanupTestContainer(t *testing.T, cid dockertest.ContainerID) {
+ err := cid.KillRemove()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/dynamodb.go b/vendor/github.com/hashicorp/vault/physical/dynamodb.go
new file mode 100644
index 0000000..4c7cefb
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/dynamodb.go
@@ -0,0 +1,745 @@
+package physical
+
+import (
+ "fmt"
+ "math"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ log "github.com/mgutz/logxi/v1"
+
+ "github.com/armon/go-metrics"
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
+ "github.com/aws/aws-sdk-go/aws/ec2metadata"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/dynamodb"
+ "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
+ "github.com/hashicorp/errwrap"
+ "github.com/hashicorp/go-uuid"
+)
+
+const (
+ // DefaultDynamoDBRegion is used when no region is configured
+ // explicitly.
+ DefaultDynamoDBRegion = "us-east-1"
+ // DefaultDynamoDBTableName is used when no table name
+ // is configured explicitly.
+ DefaultDynamoDBTableName = "vault-dynamodb-backend"
+
+ // DefaultDynamoDBReadCapacity is the default read capacity
+ // that is used when none is configured explicitly.
+ DefaultDynamoDBReadCapacity = 5
+ // DefaultDynamoDBWriteCapacity is the default write capacity
+ // that is used when none is configured explicitly.
+ DefaultDynamoDBWriteCapacity = 5
+
+ // DynamoDBEmptyPath is the string that is used instead of
+ // empty strings when stored in DynamoDB.
+ DynamoDBEmptyPath = " "
+ // DynamoDBLockPrefix is the prefix used to mark DynamoDB records
+ // as locks. This prefix causes them not to be returned by
+ // List operations.
+ DynamoDBLockPrefix = "_"
+
+ // The lock TTL matches the default that Consul API uses, 15 seconds.
+ DynamoDBLockTTL = 15 * time.Second
+
+ // The amount of time to wait between the lock renewals
+ DynamoDBLockRenewInterval = 5 * time.Second
+
+ // DynamoDBLockRetryInterval is the amount of time to wait
+ // if a lock fails before trying again.
+ DynamoDBLockRetryInterval = time.Second
+ // DynamoDBWatchRetryMax is the number of times to re-try a
+ // failed watch before signaling that leadership is lost.
+ DynamoDBWatchRetryMax = 5
+ // DynamoDBWatchRetryInterval is the amount of time to wait
+ // if a watch fails before trying again.
+ DynamoDBWatchRetryInterval = 5 * time.Second
+)
+
+// DynamoDBBackend is a physical backend that stores data in
+// a DynamoDB table. It can be run in high-availability mode
+// as DynamoDB has locking capabilities.
+type DynamoDBBackend struct {
+ table string
+ client *dynamodb.DynamoDB
+ recovery bool
+ logger log.Logger
+ haEnabled bool
+ permitPool *PermitPool
+}
+
+// DynamoDBRecord is the representation of a vault entry in
+// DynamoDB. The vault key is split up into two components
+// (Path and Key) in order to allow more efficient listings.
+type DynamoDBRecord struct {
+ Path string
+ Key string
+ Value []byte
+}
+
+// DynamoDBLock implements a lock using an DynamoDB client.
+type DynamoDBLock struct {
+ backend *DynamoDBBackend
+ value, key string
+ identity string
+ held bool
+ lock sync.Mutex
+ recovery bool
+ // Allow modifying the Lock durations for ease of unit testing.
+ renewInterval time.Duration
+ ttl time.Duration
+ watchRetryInterval time.Duration
+}
+
+type DynamoDBLockRecord struct {
+ Path string
+ Key string
+ Value []byte
+ Identity []byte
+ Expires int64
+}
+
+// newDynamoDBBackend constructs a DynamoDB backend. If the
+// configured DynamoDB table does not exist, it creates it.
+func newDynamoDBBackend(conf map[string]string, logger log.Logger) (Backend, error) {
+ table := os.Getenv("AWS_DYNAMODB_TABLE")
+ if table == "" {
+ table = conf["table"]
+ if table == "" {
+ table = DefaultDynamoDBTableName
+ }
+ }
+ readCapacityString := os.Getenv("AWS_DYNAMODB_READ_CAPACITY")
+ if readCapacityString == "" {
+ readCapacityString = conf["read_capacity"]
+ if readCapacityString == "" {
+ readCapacityString = "0"
+ }
+ }
+ readCapacity, err := strconv.Atoi(readCapacityString)
+ if err != nil {
+ return nil, fmt.Errorf("invalid read capacity: %s", readCapacityString)
+ }
+ if readCapacity == 0 {
+ readCapacity = DefaultDynamoDBReadCapacity
+ }
+
+ writeCapacityString := os.Getenv("AWS_DYNAMODB_WRITE_CAPACITY")
+ if writeCapacityString == "" {
+ writeCapacityString = conf["write_capacity"]
+ if writeCapacityString == "" {
+ writeCapacityString = "0"
+ }
+ }
+ writeCapacity, err := strconv.Atoi(writeCapacityString)
+ if err != nil {
+ return nil, fmt.Errorf("invalid write capacity: %s", writeCapacityString)
+ }
+ if writeCapacity == 0 {
+ writeCapacity = DefaultDynamoDBWriteCapacity
+ }
+
+ accessKey := os.Getenv("AWS_ACCESS_KEY_ID")
+ if accessKey == "" {
+ accessKey = conf["access_key"]
+ }
+ secretKey := os.Getenv("AWS_SECRET_ACCESS_KEY")
+ if secretKey == "" {
+ secretKey = conf["secret_key"]
+ }
+ sessionToken := os.Getenv("AWS_SESSION_TOKEN")
+ if sessionToken == "" {
+ sessionToken = conf["session_token"]
+ }
+
+ endpoint := os.Getenv("AWS_DYNAMODB_ENDPOINT")
+ if endpoint == "" {
+ endpoint = conf["endpoint"]
+ }
+ region := os.Getenv("AWS_DEFAULT_REGION")
+ if region == "" {
+ region = conf["region"]
+ if region == "" {
+ region = DefaultDynamoDBRegion
+ }
+ }
+
+ creds := credentials.NewChainCredentials([]credentials.Provider{
+ &credentials.StaticProvider{Value: credentials.Value{
+ AccessKeyID: accessKey,
+ SecretAccessKey: secretKey,
+ SessionToken: sessionToken,
+ }},
+ &credentials.EnvProvider{},
+ &credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
+ &ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())},
+ })
+
+ awsConf := aws.NewConfig().
+ WithCredentials(creds).
+ WithRegion(region).
+ WithEndpoint(endpoint)
+ client := dynamodb.New(session.New(awsConf))
+
+ if err := ensureTableExists(client, table, readCapacity, writeCapacity); err != nil {
+ return nil, err
+ }
+
+ haEnabled := os.Getenv("DYNAMODB_HA_ENABLED")
+ if haEnabled == "" {
+ haEnabled = conf["ha_enabled"]
+ }
+ haEnabledBool, _ := strconv.ParseBool(haEnabled)
+
+ recoveryMode := os.Getenv("RECOVERY_MODE")
+ if recoveryMode == "" {
+ recoveryMode = conf["recovery_mode"]
+ }
+ recoveryModeBool, _ := strconv.ParseBool(recoveryMode)
+
+ maxParStr, ok := conf["max_parallel"]
+ var maxParInt int
+ if ok {
+ maxParInt, err = strconv.Atoi(maxParStr)
+ if err != nil {
+ return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
+ }
+ if logger.IsDebug() {
+ logger.Debug("physical/dynamodb: max_parallel set", "max_parallel", maxParInt)
+ }
+ }
+
+ return &DynamoDBBackend{
+ table: table,
+ client: client,
+ permitPool: NewPermitPool(maxParInt),
+ recovery: recoveryModeBool,
+ haEnabled: haEnabledBool,
+ logger: logger,
+ }, nil
+}
+
+// Put is used to insert or update an entry
+func (d *DynamoDBBackend) Put(entry *Entry) error {
+ defer metrics.MeasureSince([]string{"dynamodb", "put"}, time.Now())
+
+ record := DynamoDBRecord{
+ Path: recordPathForVaultKey(entry.Key),
+ Key: recordKeyForVaultKey(entry.Key),
+ Value: entry.Value,
+ }
+ item, err := dynamodbattribute.ConvertToMap(record)
+ if err != nil {
+ return fmt.Errorf("could not convert prefix record to DynamoDB item: %s", err)
+ }
+ requests := []*dynamodb.WriteRequest{{
+ PutRequest: &dynamodb.PutRequest{
+ Item: item,
+ },
+ }}
+
+ for _, prefix := range prefixes(entry.Key) {
+ record = DynamoDBRecord{
+ Path: recordPathForVaultKey(prefix),
+ Key: fmt.Sprintf("%s/", recordKeyForVaultKey(prefix)),
+ }
+ item, err := dynamodbattribute.ConvertToMap(record)
+ if err != nil {
+ return fmt.Errorf("could not convert prefix record to DynamoDB item: %s", err)
+ }
+ requests = append(requests, &dynamodb.WriteRequest{
+ PutRequest: &dynamodb.PutRequest{
+ Item: item,
+ },
+ })
+ }
+
+ return d.batchWriteRequests(requests)
+}
+
+// Get is used to fetch an entry
+func (d *DynamoDBBackend) Get(key string) (*Entry, error) {
+ defer metrics.MeasureSince([]string{"dynamodb", "get"}, time.Now())
+
+ d.permitPool.Acquire()
+ defer d.permitPool.Release()
+
+ resp, err := d.client.GetItem(&dynamodb.GetItemInput{
+ TableName: aws.String(d.table),
+ ConsistentRead: aws.Bool(true),
+ Key: map[string]*dynamodb.AttributeValue{
+ "Path": {S: aws.String(recordPathForVaultKey(key))},
+ "Key": {S: aws.String(recordKeyForVaultKey(key))},
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+ if resp.Item == nil {
+ return nil, nil
+ }
+
+ record := &DynamoDBRecord{}
+ if err := dynamodbattribute.ConvertFromMap(resp.Item, record); err != nil {
+ return nil, err
+ }
+
+ return &Entry{
+ Key: vaultKey(record),
+ Value: record.Value,
+ }, nil
+}
+
+// Delete is used to permanently delete an entry
+func (d *DynamoDBBackend) Delete(key string) error {
+ defer metrics.MeasureSince([]string{"dynamodb", "delete"}, time.Now())
+
+ requests := []*dynamodb.WriteRequest{{
+ DeleteRequest: &dynamodb.DeleteRequest{
+ Key: map[string]*dynamodb.AttributeValue{
+ "Path": {S: aws.String(recordPathForVaultKey(key))},
+ "Key": {S: aws.String(recordKeyForVaultKey(key))},
+ },
+ },
+ }}
+
+ // clean up now empty 'folders'
+ prefixes := prefixes(key)
+ sort.Sort(sort.Reverse(sort.StringSlice(prefixes)))
+ for _, prefix := range prefixes {
+ items, err := d.List(prefix)
+ if err != nil {
+ return err
+ }
+ if len(items) == 1 {
+ requests = append(requests, &dynamodb.WriteRequest{
+ DeleteRequest: &dynamodb.DeleteRequest{
+ Key: map[string]*dynamodb.AttributeValue{
+ "Path": {S: aws.String(recordPathForVaultKey(prefix))},
+ "Key": {S: aws.String(fmt.Sprintf("%s/", recordKeyForVaultKey(prefix)))},
+ },
+ },
+ })
+ }
+ }
+
+ return d.batchWriteRequests(requests)
+}
+
+// List is used to list all the keys under a given
+// prefix, up to the next prefix.
+func (d *DynamoDBBackend) List(prefix string) ([]string, error) {
+ defer metrics.MeasureSince([]string{"dynamodb", "list"}, time.Now())
+
+ prefix = strings.TrimSuffix(prefix, "/")
+
+ keys := []string{}
+ prefix = escapeEmptyPath(prefix)
+ queryInput := &dynamodb.QueryInput{
+ TableName: aws.String(d.table),
+ ConsistentRead: aws.Bool(true),
+ KeyConditions: map[string]*dynamodb.Condition{
+ "Path": {
+ ComparisonOperator: aws.String("EQ"),
+ AttributeValueList: []*dynamodb.AttributeValue{{
+ S: aws.String(prefix),
+ }},
+ },
+ },
+ }
+
+ d.permitPool.Acquire()
+ defer d.permitPool.Release()
+
+ err := d.client.QueryPages(queryInput, func(out *dynamodb.QueryOutput, lastPage bool) bool {
+ var record DynamoDBRecord
+ for _, item := range out.Items {
+ dynamodbattribute.ConvertFromMap(item, &record)
+ if !strings.HasPrefix(record.Key, DynamoDBLockPrefix) {
+ keys = append(keys, record.Key)
+ }
+ }
+ return !lastPage
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return keys, nil
+}
+
+// LockWith is used for mutual exclusion based on the given key.
+func (d *DynamoDBBackend) LockWith(key, value string) (Lock, error) {
+ identity, err := uuid.GenerateUUID()
+ if err != nil {
+ return nil, err
+ }
+ return &DynamoDBLock{
+ backend: d,
+ key: filepath.Join(filepath.Dir(key), DynamoDBLockPrefix+filepath.Base(key)),
+ value: value,
+ identity: identity,
+ recovery: d.recovery,
+ renewInterval: DynamoDBLockRenewInterval,
+ ttl: DynamoDBLockTTL,
+ watchRetryInterval: DynamoDBWatchRetryInterval,
+ }, nil
+}
+
+func (d *DynamoDBBackend) HAEnabled() bool {
+ return d.haEnabled
+}
+
+// batchWriteRequests takes a list of write requests and executes them in badges
+// with a maximum size of 25 (which is the limit of BatchWriteItem requests).
+func (d *DynamoDBBackend) batchWriteRequests(requests []*dynamodb.WriteRequest) error {
+ for len(requests) > 0 {
+ batchSize := int(math.Min(float64(len(requests)), 25))
+ batch := requests[:batchSize]
+ requests = requests[batchSize:]
+
+ d.permitPool.Acquire()
+ _, err := d.client.BatchWriteItem(&dynamodb.BatchWriteItemInput{
+ RequestItems: map[string][]*dynamodb.WriteRequest{
+ d.table: batch,
+ },
+ })
+ d.permitPool.Release()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Lock tries to acquire the lock by repeatedly trying to create
+// a record in the DynamoDB table. It will block until either the
+// stop channel is closed or the lock could be acquired successfully.
+// The returned channel will be closed once the lock is deleted or
+// changed in the DynamoDB table.
+func (l *DynamoDBLock) Lock(stopCh <-chan struct{}) (doneCh <-chan struct{}, retErr error) {
+ l.lock.Lock()
+ defer l.lock.Unlock()
+ if l.held {
+ return nil, fmt.Errorf("lock already held")
+ }
+
+ done := make(chan struct{})
+ // close done channel even in case of error
+ defer func() {
+ if retErr != nil {
+ close(done)
+ }
+ }()
+
+ var (
+ stop = make(chan struct{})
+ success = make(chan struct{})
+ errors = make(chan error)
+ leader = make(chan struct{})
+ )
+ // try to acquire the lock asynchronously
+ go l.tryToLock(stop, success, errors)
+
+ select {
+ case <-success:
+ l.held = true
+ // after acquiring it successfully, we must renew the lock periodically,
+ // and watch the lock in order to close the leader channel
+ // once it is lost.
+ go l.periodicallyRenewLock(leader)
+ go l.watch(leader)
+ case retErr = <-errors:
+ close(stop)
+ return nil, retErr
+ case <-stopCh:
+ close(stop)
+ return nil, nil
+ }
+
+ return leader, retErr
+}
+
+// Unlock releases the lock by deleting the lock record from the
+// DynamoDB table.
+func (l *DynamoDBLock) Unlock() error {
+ l.lock.Lock()
+ defer l.lock.Unlock()
+ if !l.held {
+ return nil
+ }
+
+ l.held = false
+ if err := l.backend.Delete(l.key); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Value checks whether or not the lock is held by any instance of DynamoDBLock,
+// including this one, and returns the current value.
+func (l *DynamoDBLock) Value() (bool, string, error) {
+ entry, err := l.backend.Get(l.key)
+ if err != nil {
+ return false, "", err
+ }
+ if entry == nil {
+ return false, "", nil
+ }
+
+ return true, string(entry.Value), nil
+}
+
+// tryToLock tries to create a new item in DynamoDB
+// every `DynamoDBLockRetryInterval`. As long as the item
+// cannot be created (because it already exists), it will
+// be retried. If the operation fails due to an error, it
+// is sent to the errors channel.
+// When the lock could be acquired successfully, the success
+// channel is closed.
+func (l *DynamoDBLock) tryToLock(stop, success chan struct{}, errors chan error) {
+ ticker := time.NewTicker(DynamoDBLockRetryInterval)
+
+ for {
+ select {
+ case <-stop:
+ ticker.Stop()
+ case <-ticker.C:
+ err := l.writeItem()
+ if err != nil {
+ if err, ok := err.(awserr.Error); ok {
+ // Don't report a condition check failure, this means that the lock
+ // is already being held.
+ if err.Code() != dynamodb.ErrCodeConditionalCheckFailedException {
+ errors <- err
+ }
+ } else {
+ // Its not an AWS error, and is probably not transient, bail out.
+ errors <- err
+ return
+ }
+ } else {
+ ticker.Stop()
+ close(success)
+ return
+ }
+ }
+ }
+}
+
+func (l *DynamoDBLock) periodicallyRenewLock(done chan struct{}) {
+ ticker := time.NewTicker(l.renewInterval)
+ for {
+ select {
+ case <-ticker.C:
+ l.writeItem()
+ case <-done:
+ ticker.Stop()
+ return
+ }
+ }
+}
+
+// Attempts to put/update the dynamodb item using condition expressions to
+// evaluate the TTL.
+func (l *DynamoDBLock) writeItem() error {
+ now := time.Now()
+
+ _, err := l.backend.client.UpdateItem(&dynamodb.UpdateItemInput{
+ TableName: aws.String(l.backend.table),
+ Key: map[string]*dynamodb.AttributeValue{
+ "Path": &dynamodb.AttributeValue{S: aws.String(recordPathForVaultKey(l.key))},
+ "Key": &dynamodb.AttributeValue{S: aws.String(recordKeyForVaultKey(l.key))},
+ },
+ UpdateExpression: aws.String("SET #value=:value, #identity=:identity, #expires=:expires"),
+ // If both key and path already exist, we can only write if
+ // A. identity is equal to our identity (or the identity doesn't exist)
+ // or
+ // B. The ttl on the item is <= to the current time
+ ConditionExpression: aws.String(
+ "attribute_not_exists(#path) or " +
+ "attribute_not_exists(#key) or " +
+ // To work when upgrading from older versions that did not include the
+ // Identity attribute, we first check if the attr doesn't exist, and if
+ // it does, then we check if the identity is equal to our own.
+ "(attribute_not_exists(#identity) or #identity = :identity) or " +
+ "#expires <= :now",
+ ),
+ ExpressionAttributeNames: map[string]*string{
+ "#path": aws.String("Path"),
+ "#key": aws.String("Key"),
+ "#identity": aws.String("Identity"),
+ "#expires": aws.String("Expires"),
+ "#value": aws.String("Value"),
+ },
+ ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{
+ ":identity": &dynamodb.AttributeValue{B: []byte(l.identity)},
+ ":value": &dynamodb.AttributeValue{B: []byte(l.value)},
+ ":now": &dynamodb.AttributeValue{N: aws.String(strconv.FormatInt(now.UnixNano(), 10))},
+ ":expires": &dynamodb.AttributeValue{N: aws.String(strconv.FormatInt(now.Add(l.ttl).UnixNano(), 10))},
+ },
+ })
+ return err
+}
+
+// watch checks whether the lock has changed in the
+// DynamoDB table and closes the leader channel if so.
+// The interval is set by `DynamoDBWatchRetryInterval`.
+// If an error occurs during the check, watch will retry
+// the operation for `DynamoDBWatchRetryMax` times and
+// close the leader channel if it can't succeed.
+func (l *DynamoDBLock) watch(lost chan struct{}) {
+ retries := DynamoDBWatchRetryMax
+
+ ticker := time.NewTicker(l.watchRetryInterval)
+WatchLoop:
+ for {
+ select {
+ case <-ticker.C:
+ resp, err := l.backend.client.GetItem(&dynamodb.GetItemInput{
+ TableName: aws.String(l.backend.table),
+ ConsistentRead: aws.Bool(true),
+ Key: map[string]*dynamodb.AttributeValue{
+ "Path": {S: aws.String(recordPathForVaultKey(l.key))},
+ "Key": {S: aws.String(recordKeyForVaultKey(l.key))},
+ },
+ })
+ if err != nil {
+ retries--
+ if retries == 0 {
+ break WatchLoop
+ }
+ continue
+ }
+
+ if resp == nil {
+ break WatchLoop
+ }
+ record := &DynamoDBLockRecord{}
+ err = dynamodbattribute.UnmarshalMap(resp.Item, record)
+ if err != nil || string(record.Identity) != l.identity {
+ break WatchLoop
+ }
+ }
+ }
+
+ close(lost)
+}
+
+// ensureTableExists creates a DynamoDB table with a given
+// DynamoDB client. If the table already exists, it is not
+// being reconfigured.
+func ensureTableExists(client *dynamodb.DynamoDB, table string, readCapacity, writeCapacity int) error {
+ _, err := client.DescribeTable(&dynamodb.DescribeTableInput{
+ TableName: aws.String(table),
+ })
+ if awserr, ok := err.(awserr.Error); ok {
+ if awserr.Code() == "ResourceNotFoundException" {
+ _, err = client.CreateTable(&dynamodb.CreateTableInput{
+ TableName: aws.String(table),
+ ProvisionedThroughput: &dynamodb.ProvisionedThroughput{
+ ReadCapacityUnits: aws.Int64(int64(readCapacity)),
+ WriteCapacityUnits: aws.Int64(int64(writeCapacity)),
+ },
+ KeySchema: []*dynamodb.KeySchemaElement{{
+ AttributeName: aws.String("Path"),
+ KeyType: aws.String("HASH"),
+ }, {
+ AttributeName: aws.String("Key"),
+ KeyType: aws.String("RANGE"),
+ }},
+ AttributeDefinitions: []*dynamodb.AttributeDefinition{{
+ AttributeName: aws.String("Path"),
+ AttributeType: aws.String("S"),
+ }, {
+ AttributeName: aws.String("Key"),
+ AttributeType: aws.String("S"),
+ }},
+ })
+ if err != nil {
+ return err
+ }
+
+ err = client.WaitUntilTableExists(&dynamodb.DescribeTableInput{
+ TableName: aws.String(table),
+ })
+ if err != nil {
+ return err
+ }
+ }
+ }
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// recordPathForVaultKey transforms a vault key into
+// a value suitable for the `DynamoDBRecord`'s `Path`
+// property. This path equals the the vault key without
+// its last component.
+func recordPathForVaultKey(key string) string {
+ if strings.Contains(key, "/") {
+ return filepath.Dir(key)
+ }
+ return DynamoDBEmptyPath
+}
+
+// recordKeyForVaultKey transforms a vault key into
+// a value suitable for the `DynamoDBRecord`'s `Key`
+// property. This path equals the the vault key's
+// last component.
+func recordKeyForVaultKey(key string) string {
+ return filepath.Base(key)
+}
+
+// vaultKey returns the vault key for a given record
+// from the DynamoDB table. This is the combination of
+// the records Path and Key.
+func vaultKey(record *DynamoDBRecord) string {
+ path := unescapeEmptyPath(record.Path)
+ if path == "" {
+ return record.Key
+ }
+ return filepath.Join(record.Path, record.Key)
+}
+
+// escapeEmptyPath is used to escape the root key's path
+// with a value that can be stored in DynamoDB. DynamoDB
+// does not allow values to be empty strings.
+func escapeEmptyPath(s string) string {
+ if s == "" {
+ return DynamoDBEmptyPath
+ }
+ return s
+}
+
+// unescapeEmptyPath is the opposite of `escapeEmptyPath`.
+func unescapeEmptyPath(s string) string {
+ if s == DynamoDBEmptyPath {
+ return ""
+ }
+ return s
+}
+
+// prefixes returns all parent 'folders' for a given
+// vault key.
+// e.g. for 'foo/bar/baz', it returns ['foo', 'foo/bar']
+func prefixes(s string) []string {
+ components := strings.Split(s, "/")
+ result := []string{}
+ for i := 1; i < len(components); i++ {
+ result = append(result, strings.Join(components[:i], "/"))
+ }
+ return result
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/dynamodb_test.go b/vendor/github.com/hashicorp/vault/physical/dynamodb_test.go
new file mode 100644
index 0000000..daac8c8
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/dynamodb_test.go
@@ -0,0 +1,265 @@
+package physical
+
+import (
+ "fmt"
+ "math/rand"
+ "net/http"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/helper/logformat"
+ log "github.com/mgutz/logxi/v1"
+ dockertest "gopkg.in/ory-am/dockertest.v3"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/dynamodb"
+)
+
+func TestDynamoDBBackend(t *testing.T) {
+ cleanup, endpoint, credsProvider := prepareDynamoDBTestContainer(t)
+ defer cleanup()
+
+ creds, err := credsProvider.Get()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ region := os.Getenv("AWS_DEFAULT_REGION")
+ if region == "" {
+ region = "us-east-1"
+ }
+
+ conn := dynamodb.New(session.New(&aws.Config{
+ Credentials: credsProvider,
+ Endpoint: aws.String(endpoint),
+ Region: aws.String(region),
+ }))
+
+ var randInt = rand.New(rand.NewSource(time.Now().UnixNano())).Int()
+ table := fmt.Sprintf("vault-dynamodb-testacc-%d", randInt)
+
+ defer func() {
+ conn.DeleteTable(&dynamodb.DeleteTableInput{
+ TableName: aws.String(table),
+ })
+ }()
+
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ b, err := NewBackend("dynamodb", logger, map[string]string{
+ "access_key": creds.AccessKeyID,
+ "secret_key": creds.SecretAccessKey,
+ "session_token": creds.SessionToken,
+ "table": table,
+ "region": region,
+ "endpoint": endpoint,
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ testBackend(t, b)
+ testBackend_ListPrefix(t, b)
+}
+
+func TestDynamoDBHABackend(t *testing.T) {
+ cleanup, endpoint, credsProvider := prepareDynamoDBTestContainer(t)
+ defer cleanup()
+
+ creds, err := credsProvider.Get()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ region := os.Getenv("AWS_DEFAULT_REGION")
+ if region == "" {
+ region = "us-east-1"
+ }
+
+ conn := dynamodb.New(session.New(&aws.Config{
+ Credentials: credsProvider,
+ Endpoint: aws.String(endpoint),
+ Region: aws.String(region),
+ }))
+
+ var randInt = rand.New(rand.NewSource(time.Now().UnixNano())).Int()
+ table := fmt.Sprintf("vault-dynamodb-testacc-%d", randInt)
+
+ defer func() {
+ conn.DeleteTable(&dynamodb.DeleteTableInput{
+ TableName: aws.String(table),
+ })
+ }()
+
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+ b, err := NewBackend("dynamodb", logger, map[string]string{
+ "access_key": creds.AccessKeyID,
+ "secret_key": creds.SecretAccessKey,
+ "session_token": creds.SessionToken,
+ "table": table,
+ "region": region,
+ "endpoint": endpoint,
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ ha, ok := b.(HABackend)
+ if !ok {
+ t.Fatalf("dynamodb does not implement HABackend")
+ }
+ testHABackend(t, ha, ha)
+ testDynamoDBLockTTL(t, ha)
+}
+
+// Similar to testHABackend, but using internal implementation details to
+// trigger the lock failure scenario by setting the lock renew period for one
+// of the locks to a higher value than the lock TTL.
+func testDynamoDBLockTTL(t *testing.T, ha HABackend) {
+ // Set much smaller lock times to speed up the test.
+ lockTTL := time.Second * 3
+ renewInterval := time.Second * 1
+ watchInterval := time.Second * 1
+
+ // Get the lock
+ origLock, err := ha.LockWith("dynamodbttl", "bar")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ // set the first lock renew period to double the expected TTL.
+ lock := origLock.(*DynamoDBLock)
+ lock.renewInterval = lockTTL * 2
+ lock.ttl = lockTTL
+ lock.watchRetryInterval = watchInterval
+
+ // Attempt to lock
+ leaderCh, err := lock.Lock(nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if leaderCh == nil {
+ t.Fatalf("failed to get leader ch")
+ }
+
+ // Check the value
+ held, val, err := lock.Value()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !held {
+ t.Fatalf("should be held")
+ }
+ if val != "bar" {
+ t.Fatalf("bad value: %v", err)
+ }
+
+ // Second acquisition should succeed because the first lock should
+ // not renew within the 3 sec TTL.
+ origLock2, err := ha.LockWith("dynamodbttl", "baz")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ lock2 := origLock2.(*DynamoDBLock)
+ lock2.renewInterval = renewInterval
+ lock2.ttl = lockTTL
+ lock2.watchRetryInterval = watchInterval
+
+ // Cancel attempt in 6 sec so as not to block unit tests forever
+ stopCh := make(chan struct{})
+ time.AfterFunc(lockTTL*2, func() {
+ close(stopCh)
+ })
+
+ // Attempt to lock should work
+ leaderCh2, err := lock2.Lock(stopCh)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if leaderCh2 == nil {
+ t.Fatalf("should get leader ch")
+ }
+
+ // Check the value
+ held, val, err = lock2.Value()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !held {
+ t.Fatalf("should be held")
+ }
+ if val != "baz" {
+ t.Fatalf("bad value: %v", err)
+ }
+
+ // The first lock should have lost the leader channel
+ leaderChClosed := false
+ blocking := make(chan struct{})
+ // Attempt to read from the leader or the blocking channel, which ever one
+ // happens first.
+ go func() {
+ select {
+ case <-time.After(watchInterval * 3):
+ return
+ case <-leaderCh:
+ leaderChClosed = true
+ close(blocking)
+ case <-blocking:
+ return
+ }
+ }()
+
+ <-blocking
+ if !leaderChClosed {
+ t.Fatalf("original lock did not have its leader channel closed.")
+ }
+
+ // Cleanup
+ lock2.Unlock()
+}
+
+func prepareDynamoDBTestContainer(t *testing.T) (cleanup func(), retAddress string, creds *credentials.Credentials) {
+ // If environment variable is set, assume caller wants to target a real
+ // DynamoDB.
+ if os.Getenv("AWS_DYNAMODB_ENDPOINT") != "" {
+ return func() {}, os.Getenv("AWS_DYNAMODB_ENDPOINT"), credentials.NewEnvCredentials()
+ }
+
+ pool, err := dockertest.NewPool("")
+ if err != nil {
+ t.Fatalf("Failed to connect to docker: %s", err)
+ }
+
+ resource, err := pool.Run("deangiberson/aws-dynamodb-local", "latest", []string{})
+ if err != nil {
+ t.Fatalf("Could not start local DynamoDB: %s", err)
+ }
+
+ retAddress = "http://localhost:" + resource.GetPort("8000/tcp")
+ cleanup = func() {
+ err := pool.Purge(resource)
+ if err != nil {
+ t.Fatalf("Failed to cleanup local DynamoDB: %s", err)
+ }
+ }
+
+ // exponential backoff-retry, because the DynamoDB may not be able to accept
+ // connections yet
+ if err := pool.Retry(func() error {
+ var err error
+ resp, err := http.Get(retAddress)
+ if err != nil {
+ return err
+ }
+ if resp.StatusCode != 400 {
+ return fmt.Errorf("Expected DynamoDB to return status code 400, got (%s) instead.", resp.Status)
+ }
+ return nil
+ }); err != nil {
+ t.Fatalf("Could not connect to docker: %s", err)
+ }
+ return cleanup, retAddress, credentials.NewStaticCredentials("fake", "fake", "")
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/etcd.go b/vendor/github.com/hashicorp/vault/physical/etcd.go
new file mode 100644
index 0000000..01a928d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/etcd.go
@@ -0,0 +1,145 @@
+package physical
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/url"
+ "os"
+ "strings"
+
+ "github.com/coreos/etcd/client"
+ "github.com/coreos/go-semver/semver"
+ log "github.com/mgutz/logxi/v1"
+)
+
+var (
+ EtcdSyncConfigError = errors.New("client setup failed: unable to parse etcd sync field in config")
+ EtcdSyncClusterError = errors.New("client setup failed: unable to sync etcd cluster")
+ EtcdMultipleBootstrapError = errors.New("client setup failed: multiple discovery or bootstrap flags specified, use either \"address\" or \"discovery_srv\"")
+ EtcdAddressError = errors.New("client setup failed: address must be valid URL (ex. 'scheme://host:port')")
+ EtcdSemaphoreKeysEmptyError = errors.New("lock queue is empty")
+ EtcdLockHeldError = errors.New("lock already held")
+ EtcdLockNotHeldError = errors.New("lock not held")
+ EtcdSemaphoreKeyRemovedError = errors.New("semaphore key removed before lock aquisition")
+ EtcdVersionUnknow = errors.New("etcd: unknown API version")
+)
+
+// newEtcdBackend constructs a etcd backend using a given machine address.
+func newEtcdBackend(conf map[string]string, logger log.Logger) (Backend, error) {
+ var (
+ apiVersion string
+ ok bool
+ )
+
+ // v2 client can talk to both etcd2 and etcd3 thought API v2
+ c, err := newEtcdV2Client(conf)
+ if err != nil {
+ return nil, errors.New("failed to create etcd client: " + err.Error())
+ }
+
+ remoteAPIVersion, err := getEtcdAPIVersion(c)
+ if err != nil {
+ return nil, errors.New("failed to get etcd API version: " + err.Error())
+ }
+
+ if apiVersion, ok = conf["etcd_api"]; !ok {
+ apiVersion = os.Getenv("ETCD_API")
+ }
+
+ if apiVersion == "" {
+ path, ok := conf["path"]
+ if !ok {
+ path = "/vault"
+ }
+ kAPI := client.NewKeysAPI(c)
+
+ // keep using v2 if vault data exists in v2 and user does not explicitly
+ // ask for v3.
+ _, err := kAPI.Get(context.Background(), path, &client.GetOptions{})
+ if errorIsMissingKey(err) {
+ apiVersion = remoteAPIVersion
+ } else if err == nil {
+ apiVersion = "2"
+ } else {
+ return nil, errors.New("failed to check etcd status: " + err.Error())
+ }
+ }
+
+ switch apiVersion {
+ case "2", "etcd2", "v2":
+ return newEtcd2Backend(conf, logger)
+ case "3", "etcd3", "v3":
+ if remoteAPIVersion == "2" {
+ return nil, errors.New("etcd3 is required: etcd2 is running")
+ }
+ return newEtcd3Backend(conf, logger)
+ default:
+ return nil, EtcdVersionUnknow
+ }
+}
+
+// getEtcdAPIVersion gets the latest supported API version.
+// If etcd cluster version >= 3.1, "3" will be returned.
+// Otherwise, "2" will be returned.
+func getEtcdAPIVersion(c client.Client) (string, error) {
+ v, err := c.GetVersion(context.Background())
+ if err != nil {
+ return "", err
+ }
+
+ sv, err := semver.NewVersion(v.Cluster)
+ if err != nil {
+ return "", nil
+ }
+
+ if sv.LessThan(*semver.Must(semver.NewVersion("3.1.0"))) {
+ return "2", nil
+ }
+
+ return "3", nil
+}
+
+// Retrieves the config option in order of priority:
+// 1. The named environment variable if it exist
+// 2. The key in the config map
+func getEtcdOption(conf map[string]string, confKey, envVar string) (string, bool) {
+ confVal, inConf := conf[confKey]
+ envVal, inEnv := os.LookupEnv(envVar)
+ if inEnv {
+ return envVal, true
+ }
+ return confVal, inConf
+}
+
+func getEtcdEndpoints(conf map[string]string) ([]string, error) {
+ address, staticBootstrap := getEtcdOption(conf, "address", "ETCD_ADDR")
+ domain, useSrv := getEtcdOption(conf, "discovery_srv", "ETCD_DISCOVERY_SRV")
+ if useSrv && staticBootstrap {
+ return nil, EtcdMultipleBootstrapError
+ }
+
+ if staticBootstrap {
+ endpoints := strings.Split(address, Etcd2MachineDelimiter)
+ // Verify that the machines are valid URLs
+ for _, e := range endpoints {
+ u, urlErr := url.Parse(e)
+ if urlErr != nil || u.Scheme == "" {
+ return nil, EtcdAddressError
+ }
+ }
+ return endpoints, nil
+ }
+
+ if useSrv {
+ discoverer := client.NewSRVDiscover()
+ endpoints, err := discoverer.Discover(domain)
+ if err != nil {
+ return nil, fmt.Errorf("failed to discover etcd endpoints through SRV discovery: %v", err)
+ }
+ return endpoints, nil
+ }
+
+ // Set a default endpoints list if no option was set
+ return []string{"http://127.0.0.1:2379"}, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/etcd2.go b/vendor/github.com/hashicorp/vault/physical/etcd2.go
new file mode 100644
index 0000000..4ef4b08
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/etcd2.go
@@ -0,0 +1,596 @@
+package physical
+
+import (
+ "context"
+ "encoding/base64"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ metrics "github.com/armon/go-metrics"
+ "github.com/coreos/etcd/client"
+ "github.com/coreos/etcd/pkg/transport"
+ log "github.com/mgutz/logxi/v1"
+)
+
+const (
+ // Ideally, this prefix would match the "_" used in the file backend, but
+ // that prefix has special meaining in etcd. Specifically, it excludes those
+ // entries from directory listings.
+ Etcd2NodeFilePrefix = "."
+
+ // The lock prefix can (and probably should) cause an entry to be excluded
+ // from diretory listings, so "_" works here.
+ Etcd2NodeLockPrefix = "_"
+
+ // The delimiter is the same as the `-C` flag of etcdctl.
+ Etcd2MachineDelimiter = ","
+
+ // The lock TTL matches the default that Consul API uses, 15 seconds.
+ Etcd2LockTTL = 15 * time.Second
+
+ // The amount of time to wait between the semaphore key renewals
+ Etcd2LockRenewInterval = 5 * time.Second
+
+ // The amount of time to wait if a watch fails before trying again.
+ Etcd2WatchRetryInterval = time.Second
+
+ // The number of times to re-try a failed watch before signaling that leadership is lost.
+ Etcd2WatchRetryMax = 5
+)
+
+// Etcd2Backend is a physical backend that stores data at specific
+// prefix within etcd. It is used for most production situations as
+// it allows Vault to run on multiple machines in a highly-available manner.
+type Etcd2Backend struct {
+ path string
+ kAPI client.KeysAPI
+ permitPool *PermitPool
+ logger log.Logger
+ haEnabled bool
+}
+
+func newEtcd2Backend(conf map[string]string, logger log.Logger) (Backend, error) {
+ // Get the etcd path form the configuration.
+ path, ok := conf["path"]
+ if !ok {
+ path = "/vault"
+ }
+
+ // Ensure path is prefixed.
+ if !strings.HasPrefix(path, "/") {
+ path = "/" + path
+ }
+
+ c, err := newEtcdV2Client(conf)
+ if err != nil {
+ return nil, err
+ }
+
+ haEnabled := os.Getenv("ETCD_HA_ENABLED")
+ if haEnabled == "" {
+ haEnabled = conf["ha_enabled"]
+ }
+ if haEnabled == "" {
+ haEnabled = "false"
+ }
+ haEnabledBool, err := strconv.ParseBool(haEnabled)
+ if err != nil {
+ return nil, fmt.Errorf("value [%v] of 'ha_enabled' could not be understood", haEnabled)
+ }
+
+ // Should we sync the cluster state? There are three available options
+ // for our client library: don't sync (required for some proxies), sync
+ // once, or sync periodically with AutoSync. We currently support the
+ // first two.
+ sync, ok := conf["sync"]
+ if !ok {
+ sync = "yes"
+ }
+ switch sync {
+ case "yes", "true", "y", "1":
+ ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
+ syncErr := c.Sync(ctx)
+ cancel()
+ if syncErr != nil {
+ return nil, fmt.Errorf("%s: %s", EtcdSyncClusterError, syncErr)
+ }
+ case "no", "false", "n", "0":
+ default:
+ return nil, fmt.Errorf("value of 'sync' could not be understood")
+ }
+
+ kAPI := client.NewKeysAPI(c)
+
+ // Setup the backend.
+ return &Etcd2Backend{
+ path: path,
+ kAPI: kAPI,
+ permitPool: NewPermitPool(DefaultParallelOperations),
+ logger: logger,
+ haEnabled: haEnabledBool,
+ }, nil
+}
+
+func newEtcdV2Client(conf map[string]string) (client.Client, error) {
+ endpoints, err := getEtcdEndpoints(conf)
+ if err != nil {
+ return nil, err
+ }
+
+ // Create a new client from the supplied address and attempt to sync with the
+ // cluster.
+ var cTransport client.CancelableTransport
+ cert, hasCert := conf["tls_cert_file"]
+ key, hasKey := conf["tls_key_file"]
+ ca, hasCa := conf["tls_ca_file"]
+ if (hasCert && hasKey) || hasCa {
+ var transportErr error
+ tls := transport.TLSInfo{
+ CAFile: ca,
+ CertFile: cert,
+ KeyFile: key,
+ }
+ cTransport, transportErr = transport.NewTransport(tls, 30*time.Second)
+
+ if transportErr != nil {
+ return nil, transportErr
+ }
+ } else {
+ cTransport = client.DefaultTransport
+ }
+
+ cfg := client.Config{
+ Endpoints: endpoints,
+ Transport: cTransport,
+ }
+
+ // Set credentials.
+ username := os.Getenv("ETCD_USERNAME")
+ if username == "" {
+ username, _ = conf["username"]
+ }
+
+ password := os.Getenv("ETCD_PASSWORD")
+ if password == "" {
+ password, _ = conf["password"]
+ }
+
+ if username != "" && password != "" {
+ cfg.Username = username
+ cfg.Password = password
+ }
+
+ return client.New(cfg)
+}
+
+// Put is used to insert or update an entry.
+func (c *Etcd2Backend) Put(entry *Entry) error {
+ defer metrics.MeasureSince([]string{"etcd", "put"}, time.Now())
+ value := base64.StdEncoding.EncodeToString(entry.Value)
+
+ c.permitPool.Acquire()
+ defer c.permitPool.Release()
+
+ _, err := c.kAPI.Set(context.Background(), c.nodePath(entry.Key), value, nil)
+ return err
+}
+
+// Get is used to fetch an entry.
+func (c *Etcd2Backend) Get(key string) (*Entry, error) {
+ defer metrics.MeasureSince([]string{"etcd", "get"}, time.Now())
+
+ c.permitPool.Acquire()
+ defer c.permitPool.Release()
+
+ getOpts := &client.GetOptions{
+ Recursive: false,
+ Sort: false,
+ }
+ response, err := c.kAPI.Get(context.Background(), c.nodePath(key), getOpts)
+ if err != nil {
+ if errorIsMissingKey(err) {
+ return nil, nil
+ }
+ return nil, err
+ }
+
+ // Decode the stored value from base-64.
+ value, err := base64.StdEncoding.DecodeString(response.Node.Value)
+ if err != nil {
+ return nil, err
+ }
+
+ // Construct and return a new entry.
+ return &Entry{
+ Key: key,
+ Value: value,
+ }, nil
+}
+
+// Delete is used to permanently delete an entry.
+func (c *Etcd2Backend) Delete(key string) error {
+ defer metrics.MeasureSince([]string{"etcd", "delete"}, time.Now())
+
+ c.permitPool.Acquire()
+ defer c.permitPool.Release()
+
+ // Remove the key, non-recursively.
+ delOpts := &client.DeleteOptions{
+ Recursive: false,
+ }
+ _, err := c.kAPI.Delete(context.Background(), c.nodePath(key), delOpts)
+ if err != nil && !errorIsMissingKey(err) {
+ return err
+ }
+ return nil
+}
+
+// List is used to list all the keys under a given prefix, up to the next
+// prefix.
+func (c *Etcd2Backend) List(prefix string) ([]string, error) {
+ defer metrics.MeasureSince([]string{"etcd", "list"}, time.Now())
+
+ // Set a directory path from the given prefix.
+ path := c.nodePathDir(prefix)
+
+ c.permitPool.Acquire()
+ defer c.permitPool.Release()
+
+ // Get the directory, non-recursively, from etcd. If the directory is
+ // missing, we just return an empty list of contents.
+ getOpts := &client.GetOptions{
+ Recursive: false,
+ Sort: true,
+ }
+ response, err := c.kAPI.Get(context.Background(), path, getOpts)
+ if err != nil {
+ if errorIsMissingKey(err) {
+ return []string{}, nil
+ }
+ return nil, err
+ }
+
+ out := make([]string, len(response.Node.Nodes))
+ for i, node := range response.Node.Nodes {
+
+ // etcd keys include the full path, so let's trim the prefix directory
+ // path.
+ name := strings.TrimPrefix(node.Key, path)
+
+ // Check if this node is itself a directory. If it is, add a trailing
+ // slash; if it isn't remove the node file prefix.
+ if node.Dir {
+ out[i] = name + "/"
+ } else {
+ out[i] = name[1:]
+ }
+ }
+ return out, nil
+}
+
+// nodePath returns an etcd filepath based on the given key.
+func (b *Etcd2Backend) nodePath(key string) string {
+ return filepath.Join(b.path, filepath.Dir(key), Etcd2NodeFilePrefix+filepath.Base(key))
+}
+
+// nodePathDir returns an etcd directory path based on the given key.
+func (b *Etcd2Backend) nodePathDir(key string) string {
+ return filepath.Join(b.path, key) + "/"
+}
+
+// nodePathLock returns an etcd directory path used specifically for semaphore
+// indicies based on the given key.
+func (b *Etcd2Backend) nodePathLock(key string) string {
+ return filepath.Join(b.path, filepath.Dir(key), Etcd2NodeLockPrefix+filepath.Base(key)+"/")
+}
+
+// Lock is used for mutual exclusion based on the given key.
+func (c *Etcd2Backend) LockWith(key, value string) (Lock, error) {
+ return &Etcd2Lock{
+ kAPI: c.kAPI,
+ value: value,
+ semaphoreDirKey: c.nodePathLock(key),
+ }, nil
+}
+
+// HAEnabled indicates whether the HA functionality should be exposed.
+// Currently always returns true.
+func (e *Etcd2Backend) HAEnabled() bool {
+ return e.haEnabled
+}
+
+// Etcd2Lock emplements a lock using and Etcd2 backend.
+type Etcd2Lock struct {
+ kAPI client.KeysAPI
+ value, semaphoreDirKey, semaphoreKey string
+ lock sync.Mutex
+}
+
+// addSemaphoreKey acquires a new ordered semaphore key.
+func (c *Etcd2Lock) addSemaphoreKey() (string, uint64, error) {
+ // CreateInOrder is an atomic operation that can be used to enqueue a
+ // request onto a semaphore. In the rest of the comments, we refer to the
+ // resulting key as a "semaphore key".
+ // https://coreos.com/etcd/docs/2.0.8/api.html#atomically-creating-in-order-keys
+ opts := &client.CreateInOrderOptions{
+ TTL: Etcd2LockTTL,
+ }
+ response, err := c.kAPI.CreateInOrder(context.Background(), c.semaphoreDirKey, c.value, opts)
+ if err != nil {
+ return "", 0, err
+ }
+ return response.Node.Key, response.Index, nil
+}
+
+// renewSemaphoreKey renews an existing semaphore key.
+func (c *Etcd2Lock) renewSemaphoreKey() (string, uint64, error) {
+ setOpts := &client.SetOptions{
+ TTL: Etcd2LockTTL,
+ PrevExist: client.PrevExist,
+ }
+ response, err := c.kAPI.Set(context.Background(), c.semaphoreKey, c.value, setOpts)
+ if err != nil {
+ return "", 0, err
+ }
+ return response.Node.Key, response.Index, nil
+}
+
+// getSemaphoreKey determines which semaphore key holder has acquired the lock
+// and its value.
+func (c *Etcd2Lock) getSemaphoreKey() (string, string, uint64, error) {
+ // Get the list of waiters in order to see if we are next.
+ getOpts := &client.GetOptions{
+ Recursive: false,
+ Sort: true,
+ }
+ response, err := c.kAPI.Get(context.Background(), c.semaphoreDirKey, getOpts)
+ if err != nil {
+ return "", "", 0, err
+ }
+
+ // Make sure the list isn't empty.
+ if response.Node.Nodes.Len() == 0 {
+ return "", "", response.Index, nil
+ }
+ return response.Node.Nodes[0].Key, response.Node.Nodes[0].Value, response.Index, nil
+}
+
+// isHeld determines if we are the current holders of the lock.
+func (c *Etcd2Lock) isHeld() (bool, error) {
+ if c.semaphoreKey == "" {
+ return false, nil
+ }
+
+ // Get the key of the curren holder of the lock.
+ currentSemaphoreKey, _, _, err := c.getSemaphoreKey()
+ if err != nil {
+ return false, err
+ }
+ return c.semaphoreKey == currentSemaphoreKey, nil
+}
+
+// assertHeld determines whether or not we are the current holders of the lock
+// and returns an Etcd2LockNotHeldError if we are not.
+func (c *Etcd2Lock) assertHeld() error {
+ held, err := c.isHeld()
+ if err != nil {
+ return err
+ }
+
+ // Check if we don't hold the lock.
+ if !held {
+ return EtcdLockNotHeldError
+ }
+ return nil
+}
+
+// assertNotHeld determines whether or not we are the current holders of the
+// lock and returns an Etcd2LockHeldError if we are.
+func (c *Etcd2Lock) assertNotHeld() error {
+ held, err := c.isHeld()
+ if err != nil {
+ return err
+ }
+
+ // Check if we hold the lock.
+ if held {
+ return EtcdLockHeldError
+ }
+ return nil
+}
+
+// periodically renew our semaphore key so that it doesn't expire
+func (c *Etcd2Lock) periodicallyRenewSemaphoreKey(stopCh chan struct{}) {
+ for {
+ select {
+ case <-time.After(Etcd2LockRenewInterval):
+ c.renewSemaphoreKey()
+ case <-stopCh:
+ return
+ }
+ }
+}
+
+// watchForKeyRemoval continuously watches a single non-directory key starting
+// from the provided etcd index and closes the provided channel when it's
+// deleted, expires, or appears to be missing.
+func (c *Etcd2Lock) watchForKeyRemoval(key string, etcdIndex uint64, closeCh chan struct{}) {
+ retries := Etcd2WatchRetryMax
+
+ for {
+ // Start a non-recursive watch of the given key.
+ w := c.kAPI.Watcher(key, &client.WatcherOptions{AfterIndex: etcdIndex, Recursive: false})
+ response, err := w.Next(context.TODO())
+ if err != nil {
+
+ // If the key is just missing, we can exit the loop.
+ if errorIsMissingKey(err) {
+ break
+ }
+
+ // If the error is something else, there's nothing we can do but retry
+ // the watch. Check that we still have retries left.
+ retries -= 1
+ if retries == 0 {
+ break
+ }
+
+ // Sleep for a period of time to avoid slamming etcd.
+ time.Sleep(Etcd2WatchRetryInterval)
+ continue
+ }
+
+ // Check if the key we are concerned with has been removed. If it has, we
+ // can exit the loop.
+ if response.Node.Key == key &&
+ (response.Action == "delete" || response.Action == "expire") {
+ break
+ }
+
+ // Update the etcd index.
+ etcdIndex = response.Index + 1
+ }
+
+ // Regardless of what happened, we need to close the close channel.
+ close(closeCh)
+}
+
+// Lock attempts to acquire the lock by waiting for a new semaphore key in etcd
+// to become the first in the queue and will block until it is successful or
+// it receives a signal on the provided channel. The returned channel will be
+// closed when the lock is lost, either by an explicit call to Unlock or by
+// the associated semaphore key in etcd otherwise being deleted or expiring.
+//
+// If the lock is currently held by this instance of Etcd2Lock, Lock will
+// return an Etcd2LockHeldError error.
+func (c *Etcd2Lock) Lock(stopCh <-chan struct{}) (doneCh <-chan struct{}, retErr error) {
+ // Get the local lock before interacting with etcd.
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ // Check if the lock is already held.
+ if err := c.assertNotHeld(); err != nil {
+ return nil, err
+ }
+
+ // Add a new semaphore key that we will track.
+ semaphoreKey, _, err := c.addSemaphoreKey()
+ if err != nil {
+ return nil, err
+ }
+ c.semaphoreKey = semaphoreKey
+
+ // Get the current semaphore key.
+ currentSemaphoreKey, _, currentEtcdIndex, err := c.getSemaphoreKey()
+ if err != nil {
+ return nil, err
+ }
+
+ // Create an etcd-compatible boolean stop channel from the provided
+ // interface stop channel.
+ ctx, cancel := context.WithCancel(context.Background())
+ go func() {
+ <-stopCh
+ cancel()
+ }()
+ defer cancel()
+
+ // Create a channel to signal when we lose the semaphore key.
+ done := make(chan struct{})
+ defer func() {
+ if retErr != nil {
+ close(done)
+ }
+ }()
+
+ go c.periodicallyRenewSemaphoreKey(done)
+
+ // Loop until the we current semaphore key matches ours.
+ for semaphoreKey != currentSemaphoreKey {
+ var err error
+
+ // Start a watch of the entire lock directory
+ w := c.kAPI.Watcher(c.semaphoreDirKey, &client.WatcherOptions{AfterIndex: currentEtcdIndex, Recursive: true})
+ response, err := w.Next(ctx)
+ if err != nil {
+
+ // If the error is not an etcd error, we can assume it's a notification
+ // of the stop channel having closed. In this scenario, we also want to
+ // remove our semaphore key as we are no longer waiting to acquire the
+ // lock.
+ if _, ok := err.(*client.Error); !ok {
+ delOpts := &client.DeleteOptions{
+ Recursive: false,
+ }
+ _, err = c.kAPI.Delete(context.Background(), c.semaphoreKey, delOpts)
+ }
+ return nil, err
+ }
+
+ // Make sure the index we are waiting for has not been removed. If it has,
+ // this is an error and nothing else needs to be done.
+ if response.Node.Key == semaphoreKey &&
+ (response.Action == "delete" || response.Action == "expire") {
+ return nil, EtcdSemaphoreKeyRemovedError
+ }
+
+ // Get the current semaphore key and etcd index.
+ currentSemaphoreKey, _, currentEtcdIndex, err = c.getSemaphoreKey()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ go c.watchForKeyRemoval(c.semaphoreKey, currentEtcdIndex, done)
+ return done, nil
+}
+
+// Unlock releases the lock by deleting the associated semaphore key in etcd.
+//
+// If the lock is not currently held by this instance of Etcd2Lock, Unlock will
+// return an Etcd2LockNotHeldError error.
+func (c *Etcd2Lock) Unlock() error {
+ // Get the local lock before interacting with etcd.
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ // Check that the lock is held.
+ if err := c.assertHeld(); err != nil {
+ return err
+ }
+
+ // Delete our semaphore key.
+ delOpts := &client.DeleteOptions{
+ Recursive: false,
+ }
+ if _, err := c.kAPI.Delete(context.Background(), c.semaphoreKey, delOpts); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Value checks whether or not the lock is held by any instance of Etcd2Lock,
+// including this one, and returns the current value.
+func (c *Etcd2Lock) Value() (bool, string, error) {
+ semaphoreKey, semaphoreValue, _, err := c.getSemaphoreKey()
+ if err != nil {
+ return false, "", err
+ }
+
+ if semaphoreKey == "" {
+ return false, "", nil
+ }
+ return true, semaphoreValue, nil
+}
+
+// errorIsMissingKey returns true if the given error is an etcd error with an
+// error code corresponding to a missing key.
+func errorIsMissingKey(err error) bool {
+ etcdErr, ok := err.(client.Error)
+ return ok && etcdErr.Code == client.ErrorCodeKeyNotFound
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/etcd3.go b/vendor/github.com/hashicorp/vault/physical/etcd3.go
new file mode 100644
index 0000000..6fecc73
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/etcd3.go
@@ -0,0 +1,300 @@
+package physical
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ metrics "github.com/armon/go-metrics"
+ "github.com/coreos/etcd/client"
+ "github.com/coreos/etcd/clientv3"
+ "github.com/coreos/etcd/clientv3/concurrency"
+ "github.com/coreos/etcd/pkg/transport"
+ log "github.com/mgutz/logxi/v1"
+ "golang.org/x/net/context"
+)
+
+// EtcdBackend is a physical backend that stores data at specific
+// prefix within etcd. It is used for most production situations as
+// it allows Vault to run on multiple machines in a highly-available manner.
+type EtcdBackend struct {
+ logger log.Logger
+ path string
+ haEnabled bool
+
+ permitPool *PermitPool
+
+ etcd *clientv3.Client
+}
+
+// etcd default lease duration is 60s. set to 15s for faster recovery.
+const etcd3LockTimeoutInSeconds = 15
+
+// newEtcd3Backend constructs a etcd3 backend.
+func newEtcd3Backend(conf map[string]string, logger log.Logger) (Backend, error) {
+ // Get the etcd path form the configuration.
+ path, ok := conf["path"]
+ if !ok {
+ path = "/vault"
+ }
+
+ // Ensure path is prefixed.
+ if !strings.HasPrefix(path, "/") {
+ path = "/" + path
+ }
+
+ endpoints, err := getEtcdEndpoints(conf)
+ if err != nil {
+ return nil, err
+ }
+
+ cfg := clientv3.Config{
+ Endpoints: endpoints,
+ }
+
+ haEnabled := os.Getenv("ETCD_HA_ENABLED")
+ if haEnabled == "" {
+ haEnabled = conf["ha_enabled"]
+ }
+ if haEnabled == "" {
+ haEnabled = "false"
+ }
+ haEnabledBool, err := strconv.ParseBool(haEnabled)
+ if err != nil {
+ return nil, fmt.Errorf("value [%v] of 'ha_enabled' could not be understood", haEnabled)
+ }
+
+ cert, hasCert := conf["tls_cert_file"]
+ key, hasKey := conf["tls_key_file"]
+ ca, hasCa := conf["tls_ca_file"]
+ if (hasCert && hasKey) || hasCa {
+ tls := transport.TLSInfo{
+ CAFile: ca,
+ CertFile: cert,
+ KeyFile: key,
+ }
+
+ tlscfg, err := tls.ClientConfig()
+ if err != nil {
+ return nil, err
+ }
+ cfg.TLS = tlscfg
+ }
+
+ // Set credentials.
+ username := os.Getenv("ETCD_USERNAME")
+ if username == "" {
+ username, _ = conf["username"]
+ }
+
+ password := os.Getenv("ETCD_PASSWORD")
+ if password == "" {
+ password, _ = conf["password"]
+ }
+
+ if username != "" && password != "" {
+ cfg.Username = username
+ cfg.Password = password
+ }
+
+ etcd, err := clientv3.New(cfg)
+ if err != nil {
+ return nil, err
+ }
+
+ ssync, ok := conf["sync"]
+ if !ok {
+ ssync = "true"
+ }
+ sync, err := strconv.ParseBool(ssync)
+ if err != nil {
+ return nil, fmt.Errorf("value of 'sync' (%v) could not be understood", err)
+ }
+
+ if sync {
+ ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
+ err := etcd.Sync(ctx)
+ cancel()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &EtcdBackend{
+ path: path,
+ etcd: etcd,
+ permitPool: NewPermitPool(DefaultParallelOperations),
+ logger: logger,
+ haEnabled: haEnabledBool,
+ }, nil
+}
+
+func (c *EtcdBackend) Put(entry *Entry) error {
+ defer metrics.MeasureSince([]string{"etcd", "put"}, time.Now())
+
+ c.permitPool.Acquire()
+ defer c.permitPool.Release()
+
+ _, err := c.etcd.Put(context.Background(), path.Join(c.path, entry.Key), string(entry.Value))
+ return err
+}
+
+func (c *EtcdBackend) Get(key string) (*Entry, error) {
+ defer metrics.MeasureSince([]string{"etcd", "get"}, time.Now())
+
+ c.permitPool.Acquire()
+ defer c.permitPool.Release()
+
+ resp, err := c.etcd.Get(context.Background(), path.Join(c.path, key))
+ if err != nil {
+ return nil, err
+ }
+
+ if len(resp.Kvs) == 0 {
+ return nil, nil
+ }
+ if len(resp.Kvs) > 1 {
+ return nil, errors.New("unexpected number of keys from a get request")
+ }
+ return &Entry{
+ Key: key,
+ Value: resp.Kvs[0].Value,
+ }, nil
+}
+
+func (c *EtcdBackend) Delete(key string) error {
+ defer metrics.MeasureSince([]string{"etcd", "delete"}, time.Now())
+
+ c.permitPool.Acquire()
+ defer c.permitPool.Release()
+
+ _, err := c.etcd.Delete(context.Background(), path.Join(c.path, key))
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (c *EtcdBackend) List(prefix string) ([]string, error) {
+ defer metrics.MeasureSince([]string{"etcd", "list"}, time.Now())
+
+ c.permitPool.Acquire()
+ defer c.permitPool.Release()
+
+ prefix = path.Join(c.path, prefix)
+ resp, err := c.etcd.Get(context.Background(), prefix, clientv3.WithPrefix())
+ if err != nil {
+ return nil, err
+ }
+
+ keys := []string{}
+ for _, kv := range resp.Kvs {
+ key := strings.TrimPrefix(string(kv.Key), prefix)
+ key = strings.TrimPrefix(key, "/")
+
+ if len(key) == 0 {
+ continue
+ }
+
+ if i := strings.Index(key, "/"); i == -1 {
+ keys = append(keys, key)
+ } else if i != -1 {
+ keys = appendIfMissing(keys, key[:i+1])
+ }
+ }
+ return keys, nil
+}
+
+func (e *EtcdBackend) HAEnabled() bool {
+ return e.haEnabled
+}
+
+// EtcdLock emplements a lock using and etcd backend.
+type EtcdLock struct {
+ lock sync.Mutex
+ held bool
+
+ etcdSession *concurrency.Session
+ etcdMu *concurrency.Mutex
+
+ prefix string
+ value string
+
+ etcd *clientv3.Client
+}
+
+// Lock is used for mutual exclusion based on the given key.
+func (c *EtcdBackend) LockWith(key, value string) (Lock, error) {
+ session, err := concurrency.NewSession(c.etcd, concurrency.WithTTL(etcd3LockTimeoutInSeconds))
+ if err != nil {
+ return nil, err
+ }
+
+ p := path.Join(c.path, key)
+ return &EtcdLock{
+ etcdSession: session,
+ etcdMu: concurrency.NewMutex(session, p),
+ prefix: p,
+ value: value,
+ etcd: c.etcd,
+ }, nil
+}
+
+func (c *EtcdLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if c.held {
+ return nil, EtcdLockHeldError
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ go func() {
+ <-stopCh
+ cancel()
+ }()
+ if err := c.etcdMu.Lock(ctx); err != nil {
+ if err == context.Canceled {
+ return nil, nil
+ }
+ return nil, err
+ }
+ if _, err := c.etcd.Put(ctx, c.etcdMu.Key(), c.value, clientv3.WithLease(c.etcdSession.Lease())); err != nil {
+ return nil, err
+ }
+
+ c.held = true
+
+ return c.etcdSession.Done(), nil
+}
+
+func (c *EtcdLock) Unlock() error {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if !c.held {
+ return EtcdLockNotHeldError
+ }
+
+ return c.etcdMu.Unlock(context.Background())
+}
+
+func (c *EtcdLock) Value() (bool, string, error) {
+ resp, err := c.etcd.Get(context.Background(),
+ c.prefix, clientv3.WithPrefix(),
+ clientv3.WithSort(clientv3.SortByCreateRevision, clientv3.SortAscend))
+
+ if err != nil {
+ return false, "", err
+ }
+ if len(resp.Kvs) == 0 {
+ return false, "", nil
+ }
+
+ return true, string(resp.Kvs[0].Value), nil
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/etcd3_test.go b/vendor/github.com/hashicorp/vault/physical/etcd3_test.go
new file mode 100644
index 0000000..0724091
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/etcd3_test.go
@@ -0,0 +1,37 @@
+package physical
+
+import (
+ "fmt"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/helper/logformat"
+ log "github.com/mgutz/logxi/v1"
+)
+
+func TestEtcd3Backend(t *testing.T) {
+ addr := os.Getenv("ETCD_ADDR")
+ if addr == "" {
+ t.Skipf("Skipped. No etcd3 server found")
+ }
+
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ b, err := NewBackend("etcd", logger, map[string]string{
+ "path": fmt.Sprintf("/vault-%d", time.Now().Unix()),
+ "etcd_api": "3",
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ testBackend(t, b)
+ testBackend_ListPrefix(t, b)
+
+ ha, ok := b.(HABackend)
+ if !ok {
+ t.Fatalf("etcd3 does not implement HABackend")
+ }
+ testHABackend(t, ha, ha)
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/etcd_test.go b/vendor/github.com/hashicorp/vault/physical/etcd_test.go
new file mode 100644
index 0000000..adddac2
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/etcd_test.go
@@ -0,0 +1,70 @@
+package physical
+
+import (
+ "fmt"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/helper/logformat"
+ log "github.com/mgutz/logxi/v1"
+
+ "github.com/coreos/etcd/client"
+ "golang.org/x/net/context"
+)
+
+func TestEtcdBackend(t *testing.T) {
+ addr := os.Getenv("ETCD_ADDR")
+ if addr == "" {
+ t.SkipNow()
+ }
+
+ cfg := client.Config{
+ Endpoints: []string{addr},
+ Transport: client.DefaultTransport,
+ }
+
+ c, err := client.New(cfg)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
+ syncErr := c.Sync(ctx)
+ cancel()
+ if syncErr != nil {
+ t.Fatalf("err: %v", EtcdSyncClusterError)
+ }
+
+ kAPI := client.NewKeysAPI(c)
+
+ randPath := fmt.Sprintf("/vault-%d", time.Now().Unix())
+ defer func() {
+ delOpts := &client.DeleteOptions{
+ Recursive: true,
+ }
+ if _, err := kAPI.Delete(context.Background(), randPath, delOpts); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ }()
+
+ // Generate new etcd backend. The etcd address is read from ETCD_ADDR. No
+ // need to provide it explicitly.
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ b, err := NewBackend("etcd", logger, map[string]string{
+ "path": randPath,
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ testBackend(t, b)
+ testBackend_ListPrefix(t, b)
+
+ ha, ok := b.(HABackend)
+ if !ok {
+ t.Fatalf("etcd does not implement HABackend")
+ }
+ testHABackend(t, ha, ha)
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/file.go b/vendor/github.com/hashicorp/vault/physical/file.go
new file mode 100644
index 0000000..d9c5225
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/file.go
@@ -0,0 +1,248 @@
+package physical
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ log "github.com/mgutz/logxi/v1"
+
+ "github.com/hashicorp/vault/helper/jsonutil"
+)
+
+// FileBackend is a physical backend that stores data on disk
+// at a given file path. It can be used for durable single server
+// situations, or to develop locally where durability is not critical.
+//
+// WARNING: the file backend implementation is currently extremely unsafe
+// and non-performant. It is meant mostly for local testing and development.
+// It can be improved in the future.
+type FileBackend struct {
+ sync.RWMutex
+ path string
+ logger log.Logger
+ permitPool *PermitPool
+}
+
+type TransactionalFileBackend struct {
+ FileBackend
+}
+
+// newFileBackend constructs a FileBackend using the given directory
+func newFileBackend(conf map[string]string, logger log.Logger) (Backend, error) {
+ path, ok := conf["path"]
+ if !ok {
+ return nil, fmt.Errorf("'path' must be set")
+ }
+
+ return &FileBackend{
+ path: path,
+ logger: logger,
+ permitPool: NewPermitPool(DefaultParallelOperations),
+ }, nil
+}
+
+func newTransactionalFileBackend(conf map[string]string, logger log.Logger) (Backend, error) {
+ path, ok := conf["path"]
+ if !ok {
+ return nil, fmt.Errorf("'path' must be set")
+ }
+
+ // Create a pool of size 1 so only one operation runs at a time
+ return &TransactionalFileBackend{
+ FileBackend: FileBackend{
+ path: path,
+ logger: logger,
+ permitPool: NewPermitPool(1),
+ },
+ }, nil
+}
+
+func (b *FileBackend) Delete(path string) error {
+ b.permitPool.Acquire()
+ defer b.permitPool.Release()
+
+ b.Lock()
+ defer b.Unlock()
+
+ return b.DeleteInternal(path)
+}
+
+func (b *FileBackend) DeleteInternal(path string) error {
+ if path == "" {
+ return nil
+ }
+
+ basePath, key := b.expandPath(path)
+ fullPath := filepath.Join(basePath, key)
+
+ err := os.Remove(fullPath)
+ if err != nil && !os.IsNotExist(err) {
+ return fmt.Errorf("Failed to remove %q: %v", fullPath, err)
+ }
+
+ err = b.cleanupLogicalPath(path)
+
+ return err
+}
+
+// cleanupLogicalPath is used to remove all empty nodes, begining with deepest
+// one, aborting on first non-empty one, up to top-level node.
+func (b *FileBackend) cleanupLogicalPath(path string) error {
+ nodes := strings.Split(path, fmt.Sprintf("%c", os.PathSeparator))
+ for i := len(nodes) - 1; i > 0; i-- {
+ fullPath := filepath.Join(b.path, filepath.Join(nodes[:i]...))
+
+ dir, err := os.Open(fullPath)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ } else {
+ return err
+ }
+ }
+
+ list, err := dir.Readdir(1)
+ dir.Close()
+ if err != nil && err != io.EOF {
+ return err
+ }
+
+ // If we have no entries, it's an empty directory; remove it
+ if err == io.EOF || list == nil || len(list) == 0 {
+ err = os.Remove(fullPath)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (b *FileBackend) Get(k string) (*Entry, error) {
+ b.permitPool.Acquire()
+ defer b.permitPool.Release()
+
+ b.RLock()
+ defer b.RUnlock()
+
+ return b.GetInternal(k)
+}
+
+func (b *FileBackend) GetInternal(k string) (*Entry, error) {
+ path, key := b.expandPath(k)
+ path = filepath.Join(path, key)
+
+ f, err := os.Open(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil, nil
+ }
+
+ return nil, err
+ }
+ defer f.Close()
+
+ var entry Entry
+ if err := jsonutil.DecodeJSONFromReader(f, &entry); err != nil {
+ return nil, err
+ }
+
+ return &entry, nil
+}
+
+func (b *FileBackend) Put(entry *Entry) error {
+ b.permitPool.Acquire()
+ defer b.permitPool.Release()
+
+ b.Lock()
+ defer b.Unlock()
+
+ return b.PutInternal(entry)
+}
+
+func (b *FileBackend) PutInternal(entry *Entry) error {
+ path, key := b.expandPath(entry.Key)
+
+ // Make the parent tree
+ if err := os.MkdirAll(path, 0755); err != nil {
+ return err
+ }
+
+ // JSON encode the entry and write it
+ f, err := os.OpenFile(
+ filepath.Join(path, key),
+ os.O_CREATE|os.O_TRUNC|os.O_WRONLY,
+ 0600)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ enc := json.NewEncoder(f)
+ return enc.Encode(entry)
+}
+
+func (b *FileBackend) List(prefix string) ([]string, error) {
+ b.permitPool.Acquire()
+ defer b.permitPool.Release()
+
+ b.RLock()
+ defer b.RUnlock()
+
+ return b.ListInternal(prefix)
+}
+
+func (b *FileBackend) ListInternal(prefix string) ([]string, error) {
+ path := b.path
+ if prefix != "" {
+ path = filepath.Join(path, prefix)
+ }
+
+ // Read the directory contents
+ f, err := os.Open(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil, nil
+ }
+
+ return nil, err
+ }
+ defer f.Close()
+
+ names, err := f.Readdirnames(-1)
+ if err != nil {
+ return nil, err
+ }
+
+ for i, name := range names {
+ if name[0] == '_' {
+ names[i] = name[1:]
+ } else {
+ names[i] = name + "/"
+ }
+ }
+
+ return names, nil
+}
+
+func (b *FileBackend) expandPath(k string) (string, string) {
+ path := filepath.Join(b.path, k)
+ key := filepath.Base(path)
+ path = filepath.Dir(path)
+ return path, "_" + key
+}
+
+func (b *TransactionalFileBackend) Transaction(txns []TxnEntry) error {
+ b.permitPool.Acquire()
+ defer b.permitPool.Release()
+
+ b.Lock()
+ defer b.Unlock()
+
+ return genericTransactionHandler(b, txns)
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/file_test.go b/vendor/github.com/hashicorp/vault/physical/file_test.go
new file mode 100644
index 0000000..9810f4b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/file_test.go
@@ -0,0 +1,152 @@
+package physical
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "reflect"
+ "testing"
+
+ "github.com/hashicorp/vault/helper/logformat"
+ log "github.com/mgutz/logxi/v1"
+)
+
+func TestFileBackend_Base64URLEncoding(t *testing.T) {
+ backendPath, err := ioutil.TempDir("", "vault")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ defer os.RemoveAll(backendPath)
+
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ b, err := NewBackend("file", logger, map[string]string{
+ "path": backendPath,
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // List the entries. Length should be zero.
+ keys, err := b.List("")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(keys) != 0 {
+ t.Fatalf("bad: len(keys): expected: 0, actual: %d", len(keys))
+ }
+
+ // Create a storage entry without base64 encoding the file name
+ rawFullPath := filepath.Join(backendPath, "_foo")
+ e := &Entry{Key: "foo", Value: []byte("test")}
+ f, err := os.OpenFile(
+ rawFullPath,
+ os.O_CREATE|os.O_TRUNC|os.O_WRONLY,
+ 0600)
+ if err != nil {
+ t.Fatal(err)
+ }
+ json.NewEncoder(f).Encode(e)
+ f.Close()
+
+ // Get should work
+ out, err := b.Get("foo")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !reflect.DeepEqual(out, e) {
+ t.Fatalf("bad: %v expected: %v", out, e)
+ }
+
+ // List the entries. There should be one entry.
+ keys, err = b.List("")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(keys) != 1 {
+ t.Fatalf("bad: len(keys): expected: 1, actual: %d", len(keys))
+ }
+
+ err = b.Put(e)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // List the entries again. There should still be one entry.
+ keys, err = b.List("")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(keys) != 1 {
+ t.Fatalf("bad: len(keys): expected: 1, actual: %d", len(keys))
+ }
+
+ // Get should work
+ out, err = b.Get("foo")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !reflect.DeepEqual(out, e) {
+ t.Fatalf("bad: %v expected: %v", out, e)
+ }
+
+ err = b.Delete("foo")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ out, err = b.Get("foo")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out != nil {
+ t.Fatalf("bad: entry: expected: nil, actual: %#v", e)
+ }
+
+ keys, err = b.List("")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(keys) != 0 {
+ t.Fatalf("bad: len(keys): expected: 0, actual: %d", len(keys))
+ }
+
+ f, err = os.OpenFile(
+ rawFullPath,
+ os.O_CREATE|os.O_TRUNC|os.O_WRONLY,
+ 0600)
+ if err != nil {
+ t.Fatal(err)
+ }
+ json.NewEncoder(f).Encode(e)
+ f.Close()
+
+ keys, err = b.List("")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(keys) != 1 {
+ t.Fatalf("bad: len(keys): expected: 1, actual: %d", len(keys))
+ }
+}
+
+func TestFileBackend(t *testing.T) {
+ dir, err := ioutil.TempDir("", "vault")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ defer os.RemoveAll(dir)
+
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ b, err := NewBackend("file", logger, map[string]string{
+ "path": dir,
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ testBackend(t, b)
+ testBackend_ListPrefix(t, b)
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/gcs.go b/vendor/github.com/hashicorp/vault/physical/gcs.go
new file mode 100644
index 0000000..e4d4187
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/gcs.go
@@ -0,0 +1,205 @@
+package physical
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/hashicorp/errwrap"
+ log "github.com/mgutz/logxi/v1"
+
+ "cloud.google.com/go/storage"
+ "github.com/armon/go-metrics"
+ "golang.org/x/net/context"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+)
+
+// GCSBackend is a physical backend that stores data
+// within an Google Cloud Storage bucket.
+type GCSBackend struct {
+ bucketName string
+ client *storage.Client
+ permitPool *PermitPool
+ logger log.Logger
+}
+
+// newGCSBackend constructs a Google Cloud Storage backend using a pre-existing
+// bucket. Credentials can be provided to the backend, sourced
+// from environment variables or a service account file
+func newGCSBackend(conf map[string]string, logger log.Logger) (Backend, error) {
+
+ bucketName := os.Getenv("GOOGLE_STORAGE_BUCKET")
+
+ if bucketName == "" {
+ bucketName = conf["bucket"]
+ if bucketName == "" {
+ return nil, fmt.Errorf("env var GOOGLE_STORAGE_BUCKET or configuration parameter 'bucket' must be set")
+ }
+ }
+
+ // path to service account JSON file
+ credentialsFile := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")
+ if credentialsFile == "" {
+ credentialsFile = conf["credentials_file"]
+ if credentialsFile == "" {
+ return nil, fmt.Errorf("env var GOOGLE_APPLICATION_CREDENTIALS or configuration parameter 'credentials_file' must be set")
+ }
+ }
+
+ client, err := storage.NewClient(
+ context.Background(),
+ option.WithServiceAccountFile(credentialsFile),
+ )
+
+ if err != nil {
+ return nil, fmt.Errorf("error establishing storage client: '%v'", err)
+ }
+
+ // check client connectivity by getting bucket attributes
+ _, err = client.Bucket(bucketName).Attrs(context.Background())
+ if err != nil {
+ return nil, fmt.Errorf("unable to access bucket '%s': '%v'", bucketName, err)
+ }
+
+ maxParStr, ok := conf["max_parallel"]
+ var maxParInt int
+ if ok {
+ maxParInt, err = strconv.Atoi(maxParStr)
+ if err != nil {
+ return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
+ }
+ if logger.IsDebug() {
+ logger.Debug("physical/gcs: max_parallel set", "max_parallel", maxParInt)
+ }
+ }
+
+ g := GCSBackend{
+ bucketName: bucketName,
+ client: client,
+ permitPool: NewPermitPool(maxParInt),
+ logger: logger,
+ }
+
+ return &g, nil
+}
+
+// Put is used to insert or update an entry
+func (g *GCSBackend) Put(entry *Entry) error {
+ defer metrics.MeasureSince([]string{"gcs", "put"}, time.Now())
+
+ bucket := g.client.Bucket(g.bucketName)
+ writer := bucket.Object(entry.Key).NewWriter(context.Background())
+
+ g.permitPool.Acquire()
+ defer g.permitPool.Release()
+
+ defer writer.Close()
+ _, err := writer.Write(entry.Value)
+
+ return err
+}
+
+// Get is used to fetch an entry
+func (g *GCSBackend) Get(key string) (*Entry, error) {
+ defer metrics.MeasureSince([]string{"gcs", "get"}, time.Now())
+
+ bucket := g.client.Bucket(g.bucketName)
+ reader, err := bucket.Object(key).NewReader(context.Background())
+
+ // return (nil, nil) if object doesn't exist
+ if err == storage.ErrObjectNotExist {
+ return nil, nil
+ } else if err != nil {
+ return nil, fmt.Errorf("error creating bucket reader: '%v'", err)
+ }
+
+ g.permitPool.Acquire()
+ defer g.permitPool.Release()
+
+ defer reader.Close()
+ value, err := ioutil.ReadAll(reader)
+ if err != nil {
+ return nil, fmt.Errorf("error reading object '%v': '%v'", key, err)
+ }
+
+ ent := Entry{
+ Key: key,
+ Value: value,
+ }
+
+ return &ent, nil
+}
+
+// Delete is used to permanently delete an entry
+func (g *GCSBackend) Delete(key string) error {
+ defer metrics.MeasureSince([]string{"gcs", "delete"}, time.Now())
+
+ bucket := g.client.Bucket(g.bucketName)
+
+ g.permitPool.Acquire()
+ defer g.permitPool.Release()
+
+ err := bucket.Object(key).Delete(context.Background())
+
+ // deletion of non existent object is OK
+ if err == storage.ErrObjectNotExist {
+ return nil
+ } else if err != nil {
+ return fmt.Errorf("error deleting object '%v': '%v'", key, err)
+ }
+
+ return nil
+}
+
+// List is used to list all the keys under a given
+// prefix, up to the next prefix.
+func (g *GCSBackend) List(prefix string) ([]string, error) {
+ defer metrics.MeasureSince([]string{"gcs", "list"}, time.Now())
+
+ bucket := g.client.Bucket(g.bucketName)
+
+ objects_it := bucket.Objects(
+ context.Background(),
+ &storage.Query{
+ Prefix: prefix,
+ Delimiter: "/",
+ Versions: false,
+ })
+
+ keys := []string{}
+
+ g.permitPool.Acquire()
+ defer g.permitPool.Release()
+
+ for {
+ objAttrs, err := objects_it.Next()
+ if err == iterator.Done {
+ break
+ }
+ if err != nil {
+ return nil, fmt.Errorf("error listing bucket '%v': '%v'", g.bucketName, err)
+ }
+
+ path := ""
+ if objAttrs.Prefix != "" {
+ // "subdirectory"
+ path = objAttrs.Prefix
+ } else {
+ // file
+ path = objAttrs.Name
+ }
+
+ // get relative file/dir just like "basename"
+ key := strings.TrimPrefix(path, prefix)
+ keys = append(keys, key)
+ }
+
+ sort.Strings(keys)
+
+ return keys, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/gcs_test.go b/vendor/github.com/hashicorp/vault/physical/gcs_test.go
new file mode 100644
index 0000000..23c4d3a
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/gcs_test.go
@@ -0,0 +1,95 @@
+package physical
+
+import (
+ "fmt"
+ "math/rand"
+ "os"
+ "testing"
+ "time"
+
+ log "github.com/mgutz/logxi/v1"
+
+ "cloud.google.com/go/storage"
+ "github.com/hashicorp/vault/helper/logformat"
+ "golang.org/x/net/context"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+)
+
+var ConsistencyDelays = delays{
+ beforeList: 5 * time.Second,
+ beforeGet: 0 * time.Second,
+}
+
+func TestGCSBackend(t *testing.T) {
+ credentialsFile := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")
+
+ // projectID is only required for creating a bucket for this test
+ projectID := os.Getenv("GOOGLE_PROJECT_ID")
+
+ if credentialsFile == "" || projectID == "" {
+ t.SkipNow()
+ }
+
+ client, err := storage.NewClient(
+ context.Background(),
+ option.WithServiceAccountFile(credentialsFile),
+ )
+
+ if err != nil {
+ t.Fatalf("error creating storage client: '%v'", err)
+ }
+
+ var randInt = rand.New(rand.NewSource(time.Now().UnixNano())).Int()
+ bucketName := fmt.Sprintf("vault-gcs-testacc-%d", randInt)
+
+ bucket := client.Bucket(bucketName)
+ err = bucket.Create(context.Background(), projectID, nil)
+
+ if err != nil {
+ t.Fatalf("error creating bucket '%v': '%v'", bucketName, err)
+ }
+
+ // test bucket teardown
+ defer func() {
+ objects_it := bucket.Objects(context.Background(), nil)
+
+ time.Sleep(ConsistencyDelays.beforeList)
+ // have to delete all objects before deleting bucket
+ for {
+ objAttrs, err := objects_it.Next()
+ if err == iterator.Done {
+ break
+ }
+ if err != nil {
+ t.Fatalf("error listing bucket '%v' contents: '%v'", bucketName, err)
+ }
+
+ // ignore errors in deleting a single object, we only care about deleting the bucket
+ // occassionally we get "storage: object doesn't exist" which is fine
+ bucket.Object(objAttrs.Name).Delete(context.Background())
+ }
+
+ // not a list operation, but google lists to make sure the bucket is empty on delete
+ time.Sleep(ConsistencyDelays.beforeList)
+ err := bucket.Delete(context.Background())
+ if err != nil {
+ t.Fatalf("error deleting bucket '%s': '%v'", bucketName, err)
+ }
+ }()
+
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ b, err := NewBackend("gcs", logger, map[string]string{
+ "bucket": bucketName,
+ "credentials_file": credentialsFile,
+ })
+
+ if err != nil {
+ t.Fatalf("error creating google cloud storage backend: '%s'", err)
+ }
+
+ testEventuallyConsistentBackend(t, b, ConsistencyDelays)
+ testEventuallyConsistentBackend_ListPrefix(t, b, ConsistencyDelays)
+
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/inmem.go b/vendor/github.com/hashicorp/vault/physical/inmem.go
new file mode 100644
index 0000000..47f18eb
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/inmem.go
@@ -0,0 +1,142 @@
+package physical
+
+import (
+ "strings"
+ "sync"
+
+ log "github.com/mgutz/logxi/v1"
+
+ "github.com/armon/go-radix"
+)
+
+// InmemBackend is an in-memory only physical backend. It is useful
+// for testing and development situations where the data is not
+// expected to be durable.
+type InmemBackend struct {
+ sync.RWMutex
+ root *radix.Tree
+ permitPool *PermitPool
+ logger log.Logger
+}
+
+type TransactionalInmemBackend struct {
+ InmemBackend
+}
+
+// NewInmem constructs a new in-memory backend
+func NewInmem(logger log.Logger) *InmemBackend {
+ in := &InmemBackend{
+ root: radix.New(),
+ permitPool: NewPermitPool(DefaultParallelOperations),
+ logger: logger,
+ }
+ return in
+}
+
+// Basically for now just creates a permit pool of size 1 so only one operation
+// can run at a time
+func NewTransactionalInmem(logger log.Logger) *TransactionalInmemBackend {
+ in := &TransactionalInmemBackend{
+ InmemBackend: InmemBackend{
+ root: radix.New(),
+ permitPool: NewPermitPool(1),
+ logger: logger,
+ },
+ }
+ return in
+}
+
+// Put is used to insert or update an entry
+func (i *InmemBackend) Put(entry *Entry) error {
+ i.permitPool.Acquire()
+ defer i.permitPool.Release()
+
+ i.Lock()
+ defer i.Unlock()
+
+ return i.PutInternal(entry)
+}
+
+func (i *InmemBackend) PutInternal(entry *Entry) error {
+ i.root.Insert(entry.Key, entry)
+ return nil
+}
+
+// Get is used to fetch an entry
+func (i *InmemBackend) Get(key string) (*Entry, error) {
+ i.permitPool.Acquire()
+ defer i.permitPool.Release()
+
+ i.RLock()
+ defer i.RUnlock()
+
+ return i.GetInternal(key)
+}
+
+func (i *InmemBackend) GetInternal(key string) (*Entry, error) {
+ if raw, ok := i.root.Get(key); ok {
+ return raw.(*Entry), nil
+ }
+ return nil, nil
+}
+
+// Delete is used to permanently delete an entry
+func (i *InmemBackend) Delete(key string) error {
+ i.permitPool.Acquire()
+ defer i.permitPool.Release()
+
+ i.Lock()
+ defer i.Unlock()
+
+ return i.DeleteInternal(key)
+}
+
+func (i *InmemBackend) DeleteInternal(key string) error {
+ i.root.Delete(key)
+ return nil
+}
+
+// List is used ot list all the keys under a given
+// prefix, up to the next prefix.
+func (i *InmemBackend) List(prefix string) ([]string, error) {
+ i.permitPool.Acquire()
+ defer i.permitPool.Release()
+
+ i.RLock()
+ defer i.RUnlock()
+
+ return i.ListInternal(prefix)
+}
+
+func (i *InmemBackend) ListInternal(prefix string) ([]string, error) {
+ var out []string
+ seen := make(map[string]interface{})
+ walkFn := func(s string, v interface{}) bool {
+ trimmed := strings.TrimPrefix(s, prefix)
+ sep := strings.Index(trimmed, "/")
+ if sep == -1 {
+ out = append(out, trimmed)
+ } else {
+ trimmed = trimmed[:sep+1]
+ if _, ok := seen[trimmed]; !ok {
+ out = append(out, trimmed)
+ seen[trimmed] = struct{}{}
+ }
+ }
+ return false
+ }
+ i.root.WalkPrefix(prefix, walkFn)
+
+ return out, nil
+}
+
+// Implements the transaction interface
+func (t *TransactionalInmemBackend) Transaction(txns []TxnEntry) error {
+ t.permitPool.Acquire()
+ defer t.permitPool.Release()
+
+ t.Lock()
+ defer t.Unlock()
+
+ return genericTransactionHandler(t, txns)
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/inmem_ha.go b/vendor/github.com/hashicorp/vault/physical/inmem_ha.go
new file mode 100644
index 0000000..bc691c5
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/inmem_ha.go
@@ -0,0 +1,156 @@
+package physical
+
+import (
+ "fmt"
+ "sync"
+
+ log "github.com/mgutz/logxi/v1"
+)
+
+type InmemHABackend struct {
+ Backend
+ locks map[string]string
+ l sync.Mutex
+ cond *sync.Cond
+ logger log.Logger
+}
+
+type TransactionalInmemHABackend struct {
+ Transactional
+ InmemHABackend
+}
+
+// NewInmemHA constructs a new in-memory HA backend. This is only for testing.
+func NewInmemHA(logger log.Logger) *InmemHABackend {
+ in := &InmemHABackend{
+ Backend: NewInmem(logger),
+ locks: make(map[string]string),
+ logger: logger,
+ }
+ in.cond = sync.NewCond(&in.l)
+ return in
+}
+
+func NewTransactionalInmemHA(logger log.Logger) *TransactionalInmemHABackend {
+ transInmem := NewTransactionalInmem(logger)
+ inmemHA := InmemHABackend{
+ Backend: transInmem,
+ locks: make(map[string]string),
+ logger: logger,
+ }
+
+ in := &TransactionalInmemHABackend{
+ InmemHABackend: inmemHA,
+ Transactional: transInmem,
+ }
+ in.cond = sync.NewCond(&in.l)
+ return in
+}
+
+// LockWith is used for mutual exclusion based on the given key.
+func (i *InmemHABackend) LockWith(key, value string) (Lock, error) {
+ l := &InmemLock{
+ in: i,
+ key: key,
+ value: value,
+ }
+ return l, nil
+}
+
+// LockMapSize is used in some tests to determine whether this backend has ever
+// been used for HA purposes rather than simply for storage
+func (i *InmemHABackend) LockMapSize() int {
+ return len(i.locks)
+}
+
+// HAEnabled indicates whether the HA functionality should be exposed.
+// Currently always returns true.
+func (i *InmemHABackend) HAEnabled() bool {
+ return true
+}
+
+// InmemLock is an in-memory Lock implementation for the HABackend
+type InmemLock struct {
+ in *InmemHABackend
+ key string
+ value string
+
+ held bool
+ leaderCh chan struct{}
+ l sync.Mutex
+}
+
+func (i *InmemLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
+ i.l.Lock()
+ defer i.l.Unlock()
+ if i.held {
+ return nil, fmt.Errorf("lock already held")
+ }
+
+ // Attempt an async acquisition
+ didLock := make(chan struct{})
+ releaseCh := make(chan bool, 1)
+ go func() {
+ // Wait to acquire the lock
+ i.in.l.Lock()
+ _, ok := i.in.locks[i.key]
+ for ok {
+ i.in.cond.Wait()
+ _, ok = i.in.locks[i.key]
+ }
+ i.in.locks[i.key] = i.value
+ i.in.l.Unlock()
+
+ // Signal that lock is held
+ close(didLock)
+
+ // Handle an early abort
+ release := <-releaseCh
+ if release {
+ i.in.l.Lock()
+ delete(i.in.locks, i.key)
+ i.in.l.Unlock()
+ i.in.cond.Broadcast()
+ }
+ }()
+
+ // Wait for lock acquisition or shutdown
+ select {
+ case <-didLock:
+ releaseCh <- false
+ case <-stopCh:
+ releaseCh <- true
+ return nil, nil
+ }
+
+ // Create the leader channel
+ i.held = true
+ i.leaderCh = make(chan struct{})
+ return i.leaderCh, nil
+}
+
+func (i *InmemLock) Unlock() error {
+ i.l.Lock()
+ defer i.l.Unlock()
+
+ if !i.held {
+ return nil
+ }
+
+ close(i.leaderCh)
+ i.leaderCh = nil
+ i.held = false
+
+ i.in.l.Lock()
+ delete(i.in.locks, i.key)
+ i.in.l.Unlock()
+ i.in.cond.Broadcast()
+ return nil
+}
+
+func (i *InmemLock) Value() (bool, string, error) {
+ i.in.l.Lock()
+ val, ok := i.in.locks[i.key]
+ i.in.l.Unlock()
+ return ok, val, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/inmem_ha_test.go b/vendor/github.com/hashicorp/vault/physical/inmem_ha_test.go
new file mode 100644
index 0000000..102f85b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/inmem_ha_test.go
@@ -0,0 +1,15 @@
+package physical
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/helper/logformat"
+ log "github.com/mgutz/logxi/v1"
+)
+
+func TestInmemHA(t *testing.T) {
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ inm := NewInmemHA(logger)
+ testHABackend(t, inm, inm)
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/inmem_test.go b/vendor/github.com/hashicorp/vault/physical/inmem_test.go
new file mode 100644
index 0000000..7c3c788
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/inmem_test.go
@@ -0,0 +1,16 @@
+package physical
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/helper/logformat"
+ log "github.com/mgutz/logxi/v1"
+)
+
+func TestInmem(t *testing.T) {
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ inm := NewInmem(logger)
+ testBackend(t, inm)
+ testBackend_ListPrefix(t, inm)
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/mssql.go b/vendor/github.com/hashicorp/vault/physical/mssql.go
new file mode 100644
index 0000000..25709a2
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/mssql.go
@@ -0,0 +1,216 @@
+package physical
+
+import (
+ "database/sql"
+ "fmt"
+ "sort"
+ "strings"
+ "time"
+
+ "github.com/armon/go-metrics"
+ _ "github.com/denisenkom/go-mssqldb"
+ log "github.com/mgutz/logxi/v1"
+)
+
+type MsSQLBackend struct {
+ dbTable string
+ client *sql.DB
+ statements map[string]*sql.Stmt
+ logger log.Logger
+}
+
+func newMsSQLBackend(conf map[string]string, logger log.Logger) (Backend, error) {
+ username, ok := conf["username"]
+ if !ok {
+ username = ""
+ }
+
+ password, ok := conf["password"]
+ if !ok {
+ password = ""
+ }
+
+ server, ok := conf["server"]
+ if !ok || server == "" {
+ return nil, fmt.Errorf("missing server")
+ }
+
+ database, ok := conf["database"]
+ if !ok {
+ database = "Vault"
+ }
+
+ table, ok := conf["table"]
+ if !ok {
+ table = "Vault"
+ }
+
+ appname, ok := conf["appname"]
+ if !ok {
+ appname = "Vault"
+ }
+
+ connectionTimeout, ok := conf["connectiontimeout"]
+ if !ok {
+ connectionTimeout = "30"
+ }
+
+ logLevel, ok := conf["loglevel"]
+ if !ok {
+ logLevel = "0"
+ }
+
+ schema, ok := conf["schema"]
+ if !ok || schema == "" {
+ schema = "dbo"
+ }
+
+ connectionString := fmt.Sprintf("server=%s;app name=%s;connection timeout=%s;log=%s", server, appname, connectionTimeout, logLevel)
+ if username != "" {
+ connectionString += ";user id=" + username
+ }
+
+ if password != "" {
+ connectionString += ";password=" + password
+ }
+
+ db, err := sql.Open("mssql", connectionString)
+ if err != nil {
+ return nil, fmt.Errorf("failed to connect to mssql: %v", err)
+ }
+
+ if _, err := db.Exec("IF NOT EXISTS(SELECT * FROM sys.databases WHERE name = '" + database + "') CREATE DATABASE " + database); err != nil {
+ return nil, fmt.Errorf("failed to create mssql database: %v", err)
+ }
+
+ dbTable := database + "." + schema + "." + table
+ createQuery := "IF NOT EXISTS(SELECT 1 FROM " + database + ".INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE='BASE TABLE' AND TABLE_NAME='" + table + "' AND TABLE_SCHEMA='" + schema +
+ "') CREATE TABLE " + dbTable + " (Path VARCHAR(512) PRIMARY KEY, Value VARBINARY(MAX))"
+
+ if schema != "dbo" {
+ if _, err := db.Exec("USE " + database); err != nil {
+ return nil, fmt.Errorf("failed to switch mssql database: %v", err)
+ }
+
+ var num int
+ err = db.QueryRow("SELECT 1 FROM sys.schemas WHERE name = '" + schema + "'").Scan(&num)
+
+ switch {
+ case err == sql.ErrNoRows:
+ if _, err := db.Exec("CREATE SCHEMA " + schema); err != nil {
+ return nil, fmt.Errorf("failed to create mssql schema: %v", err)
+ }
+
+ case err != nil:
+ return nil, fmt.Errorf("failed to check if mssql schema exists: %v", err)
+ }
+ }
+
+ if _, err := db.Exec(createQuery); err != nil {
+ return nil, fmt.Errorf("failed to create mssql table: %v", err)
+ }
+
+ m := &MsSQLBackend{
+ dbTable: dbTable,
+ client: db,
+ statements: make(map[string]*sql.Stmt),
+ logger: logger,
+ }
+
+ statements := map[string]string{
+ "put": "IF EXISTS(SELECT 1 FROM " + dbTable + " WHERE Path = ?) UPDATE " + dbTable + " SET Value = ? WHERE Path = ?" +
+ " ELSE INSERT INTO " + dbTable + " VALUES(?, ?)",
+ "get": "SELECT Value FROM " + dbTable + " WHERE Path = ?",
+ "delete": "DELETE FROM " + dbTable + " WHERE Path = ?",
+ "list": "SELECT Path FROM " + dbTable + " WHERE Path LIKE ?",
+ }
+
+ for name, query := range statements {
+ if err := m.prepare(name, query); err != nil {
+ return nil, err
+ }
+ }
+
+ return m, nil
+}
+
+func (m *MsSQLBackend) prepare(name, query string) error {
+ stmt, err := m.client.Prepare(query)
+ if err != nil {
+ return fmt.Errorf("failed to prepare '%s': %v", name, err)
+ }
+
+ m.statements[name] = stmt
+
+ return nil
+}
+
+func (m *MsSQLBackend) Put(entry *Entry) error {
+ defer metrics.MeasureSince([]string{"mssql", "put"}, time.Now())
+
+ _, err := m.statements["put"].Exec(entry.Key, entry.Value, entry.Key, entry.Key, entry.Value)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *MsSQLBackend) Get(key string) (*Entry, error) {
+ defer metrics.MeasureSince([]string{"mssql", "get"}, time.Now())
+
+ var result []byte
+ err := m.statements["get"].QueryRow(key).Scan(&result)
+ if err == sql.ErrNoRows {
+ return nil, nil
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ ent := &Entry{
+ Key: key,
+ Value: result,
+ }
+
+ return ent, nil
+}
+
+func (m *MsSQLBackend) Delete(key string) error {
+ defer metrics.MeasureSince([]string{"mssql", "delete"}, time.Now())
+
+ _, err := m.statements["delete"].Exec(key)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *MsSQLBackend) List(prefix string) ([]string, error) {
+ defer metrics.MeasureSince([]string{"mssql", "list"}, time.Now())
+
+ likePrefix := prefix + "%"
+ rows, err := m.statements["list"].Query(likePrefix)
+
+ var keys []string
+ for rows.Next() {
+ var key string
+ err = rows.Scan(&key)
+ if err != nil {
+ return nil, fmt.Errorf("failed to scan rows: %v", err)
+ }
+
+ key = strings.TrimPrefix(key, prefix)
+ if i := strings.Index(key, "/"); i == -1 {
+ keys = append(keys, key)
+ } else if i != -1 {
+ keys = appendIfMissing(keys, string(key[:i+1]))
+ }
+ }
+
+ sort.Strings(keys)
+
+ return keys, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/mssql_test.go b/vendor/github.com/hashicorp/vault/physical/mssql_test.go
new file mode 100644
index 0000000..11f4684
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/mssql_test.go
@@ -0,0 +1,58 @@
+package physical
+
+import (
+ "os"
+ "testing"
+
+ "github.com/hashicorp/vault/helper/logformat"
+ log "github.com/mgutz/logxi/v1"
+
+ _ "github.com/denisenkom/go-mssqldb"
+)
+
+func TestMsSQLBackend(t *testing.T) {
+ server := os.Getenv("MSSQL_SERVER")
+ if server == "" {
+ t.SkipNow()
+ }
+
+ database := os.Getenv("MSSQL_DB")
+ if database == "" {
+ database = "test"
+ }
+
+ table := os.Getenv("MSSQL_TABLE")
+ if table == "" {
+ table = "test"
+ }
+
+ username := os.Getenv("MSSQL_USERNAME")
+ password := os.Getenv("MSSQL_PASSWORD")
+
+ // Run vault tests
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ b, err := NewBackend("mssql", logger, map[string]string{
+ "server": server,
+ "database": database,
+ "table": table,
+ "username": username,
+ "password": password,
+ })
+
+ if err != nil {
+ t.Fatalf("Failed to create new backend: %v", err)
+ }
+
+ defer func() {
+ mssql := b.(*MsSQLBackend)
+ _, err := mssql.client.Exec("DROP TABLE " + mssql.dbTable)
+ if err != nil {
+ t.Fatalf("Failed to drop table: %v", err)
+ }
+ }()
+
+ testBackend(t, b)
+ testBackend_ListPrefix(t, b)
+
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/mysql.go b/vendor/github.com/hashicorp/vault/physical/mysql.go
new file mode 100644
index 0000000..ce13514
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/mysql.go
@@ -0,0 +1,225 @@
+package physical
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "database/sql"
+ "fmt"
+ "io/ioutil"
+ "net/url"
+ "sort"
+ "strings"
+ "time"
+
+ log "github.com/mgutz/logxi/v1"
+
+ "github.com/armon/go-metrics"
+ mysql "github.com/go-sql-driver/mysql"
+)
+
+// Unreserved tls key
+// Reserved values are "true", "false", "skip-verify"
+const mysqlTLSKey = "default"
+
+// MySQLBackend is a physical backend that stores data
+// within MySQL database.
+type MySQLBackend struct {
+ dbTable string
+ client *sql.DB
+ statements map[string]*sql.Stmt
+ logger log.Logger
+}
+
+// newMySQLBackend constructs a MySQL backend using the given API client and
+// server address and credential for accessing mysql database.
+func newMySQLBackend(conf map[string]string, logger log.Logger) (Backend, error) {
+ // Get the MySQL credentials to perform read/write operations.
+ username, ok := conf["username"]
+ if !ok || username == "" {
+ return nil, fmt.Errorf("missing username")
+ }
+ password, ok := conf["password"]
+ if !ok || username == "" {
+ return nil, fmt.Errorf("missing password")
+ }
+
+ // Get or set MySQL server address. Defaults to localhost and default port(3306)
+ address, ok := conf["address"]
+ if !ok {
+ address = "127.0.0.1:3306"
+ }
+
+ // Get the MySQL database and table details.
+ database, ok := conf["database"]
+ if !ok {
+ database = "vault"
+ }
+ table, ok := conf["table"]
+ if !ok {
+ table = "vault"
+ }
+ dbTable := database + "." + table
+
+ dsnParams := url.Values{}
+ tlsCaFile, ok := conf["tls_ca_file"]
+ if ok {
+ if err := setupMySQLTLSConfig(tlsCaFile); err != nil {
+ return nil, fmt.Errorf("failed register TLS config: %v", err)
+ }
+
+ dsnParams.Add("tls", mysqlTLSKey)
+ }
+
+ // Create MySQL handle for the database.
+ dsn := username + ":" + password + "@tcp(" + address + ")/?" + dsnParams.Encode()
+ db, err := sql.Open("mysql", dsn)
+ if err != nil {
+ return nil, fmt.Errorf("failed to connect to mysql: %v", err)
+ }
+
+ // Create the required database if it doesn't exists.
+ if _, err := db.Exec("CREATE DATABASE IF NOT EXISTS " + database); err != nil {
+ return nil, fmt.Errorf("failed to create mysql database: %v", err)
+ }
+
+ // Create the required table if it doesn't exists.
+ create_query := "CREATE TABLE IF NOT EXISTS " + dbTable +
+ " (vault_key varbinary(512), vault_value mediumblob, PRIMARY KEY (vault_key))"
+ if _, err := db.Exec(create_query); err != nil {
+ return nil, fmt.Errorf("failed to create mysql table: %v", err)
+ }
+
+ // Setup the backend.
+ m := &MySQLBackend{
+ dbTable: dbTable,
+ client: db,
+ statements: make(map[string]*sql.Stmt),
+ logger: logger,
+ }
+
+ // Prepare all the statements required
+ statements := map[string]string{
+ "put": "INSERT INTO " + dbTable +
+ " VALUES( ?, ? ) ON DUPLICATE KEY UPDATE vault_value=VALUES(vault_value)",
+ "get": "SELECT vault_value FROM " + dbTable + " WHERE vault_key = ?",
+ "delete": "DELETE FROM " + dbTable + " WHERE vault_key = ?",
+ "list": "SELECT vault_key FROM " + dbTable + " WHERE vault_key LIKE ?",
+ }
+ for name, query := range statements {
+ if err := m.prepare(name, query); err != nil {
+ return nil, err
+ }
+ }
+ return m, nil
+}
+
+// prepare is a helper to prepare a query for future execution
+func (m *MySQLBackend) prepare(name, query string) error {
+ stmt, err := m.client.Prepare(query)
+ if err != nil {
+ return fmt.Errorf("failed to prepare '%s': %v", name, err)
+ }
+ m.statements[name] = stmt
+ return nil
+}
+
+// Put is used to insert or update an entry.
+func (m *MySQLBackend) Put(entry *Entry) error {
+ defer metrics.MeasureSince([]string{"mysql", "put"}, time.Now())
+
+ _, err := m.statements["put"].Exec(entry.Key, entry.Value)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// Get is used to fetch and entry.
+func (m *MySQLBackend) Get(key string) (*Entry, error) {
+ defer metrics.MeasureSince([]string{"mysql", "get"}, time.Now())
+
+ var result []byte
+ err := m.statements["get"].QueryRow(key).Scan(&result)
+ if err == sql.ErrNoRows {
+ return nil, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ ent := &Entry{
+ Key: key,
+ Value: result,
+ }
+ return ent, nil
+}
+
+// Delete is used to permanently delete an entry
+func (m *MySQLBackend) Delete(key string) error {
+ defer metrics.MeasureSince([]string{"mysql", "delete"}, time.Now())
+
+ _, err := m.statements["delete"].Exec(key)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// List is used to list all the keys under a given
+// prefix, up to the next prefix.
+func (m *MySQLBackend) List(prefix string) ([]string, error) {
+ defer metrics.MeasureSince([]string{"mysql", "list"}, time.Now())
+
+ // Add the % wildcard to the prefix to do the prefix search
+ likePrefix := prefix + "%"
+ rows, err := m.statements["list"].Query(likePrefix)
+ if err != nil {
+ return nil, fmt.Errorf("failed to execute statement: %v", err)
+ }
+
+ var keys []string
+ for rows.Next() {
+ var key string
+ err = rows.Scan(&key)
+ if err != nil {
+ return nil, fmt.Errorf("failed to scan rows: %v", err)
+ }
+
+ key = strings.TrimPrefix(key, prefix)
+ if i := strings.Index(key, "/"); i == -1 {
+ // Add objects only from the current 'folder'
+ keys = append(keys, key)
+ } else if i != -1 {
+ // Add truncated 'folder' paths
+ keys = appendIfMissing(keys, string(key[:i+1]))
+ }
+ }
+
+ sort.Strings(keys)
+ return keys, nil
+}
+
+// Establish a TLS connection with a given CA certificate
+// Register a tsl.Config associted with the same key as the dns param from sql.Open
+// foo:bar@tcp(127.0.0.1:3306)/dbname?tls=default
+func setupMySQLTLSConfig(tlsCaFile string) error {
+ rootCertPool := x509.NewCertPool()
+
+ pem, err := ioutil.ReadFile(tlsCaFile)
+ if err != nil {
+ return err
+ }
+
+ if ok := rootCertPool.AppendCertsFromPEM(pem); !ok {
+ return err
+ }
+
+ err = mysql.RegisterTLSConfig(mysqlTLSKey, &tls.Config{
+ RootCAs: rootCertPool,
+ })
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/mysql_test.go b/vendor/github.com/hashicorp/vault/physical/mysql_test.go
new file mode 100644
index 0000000..1eabd9f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/mysql_test.go
@@ -0,0 +1,58 @@
+package physical
+
+import (
+ "os"
+ "testing"
+
+ "github.com/hashicorp/vault/helper/logformat"
+ log "github.com/mgutz/logxi/v1"
+
+ _ "github.com/go-sql-driver/mysql"
+)
+
+func TestMySQLBackend(t *testing.T) {
+ address := os.Getenv("MYSQL_ADDR")
+ if address == "" {
+ t.SkipNow()
+ }
+
+ database := os.Getenv("MYSQL_DB")
+ if database == "" {
+ database = "test"
+ }
+
+ table := os.Getenv("MYSQL_TABLE")
+ if table == "" {
+ table = "test"
+ }
+
+ username := os.Getenv("MYSQL_USERNAME")
+ password := os.Getenv("MYSQL_PASSWORD")
+
+ // Run vault tests
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ b, err := NewBackend("mysql", logger, map[string]string{
+ "address": address,
+ "database": database,
+ "table": table,
+ "username": username,
+ "password": password,
+ })
+
+ if err != nil {
+ t.Fatalf("Failed to create new backend: %v", err)
+ }
+
+ defer func() {
+ mysql := b.(*MySQLBackend)
+ _, err := mysql.client.Exec("DROP TABLE " + mysql.dbTable)
+ if err != nil {
+ t.Fatalf("Failed to drop table: %v", err)
+ }
+ }()
+
+ testBackend(t, b)
+ testBackend_ListPrefix(t, b)
+
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/physical.go b/vendor/github.com/hashicorp/vault/physical/physical.go
new file mode 100644
index 0000000..b35d281
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/physical.go
@@ -0,0 +1,182 @@
+package physical
+
+import (
+ "fmt"
+ "sync"
+
+ log "github.com/mgutz/logxi/v1"
+)
+
+const DefaultParallelOperations = 128
+
+// The operation type
+type Operation string
+
+const (
+ DeleteOperation Operation = "delete"
+ GetOperation = "get"
+ ListOperation = "list"
+ PutOperation = "put"
+)
+
+// ShutdownSignal
+type ShutdownChannel chan struct{}
+
+// Backend is the interface required for a physical
+// backend. A physical backend is used to durably store
+// data outside of Vault. As such, it is completely untrusted,
+// and is only accessed via a security barrier. The backends
+// must represent keys in a hierarchical manner. All methods
+// are expected to be thread safe.
+type Backend interface {
+ // Put is used to insert or update an entry
+ Put(entry *Entry) error
+
+ // Get is used to fetch an entry
+ Get(key string) (*Entry, error)
+
+ // Delete is used to permanently delete an entry
+ Delete(key string) error
+
+ // List is used ot list all the keys under a given
+ // prefix, up to the next prefix.
+ List(prefix string) ([]string, error)
+}
+
+// HABackend is an extensions to the standard physical
+// backend to support high-availability. Vault only expects to
+// use mutual exclusion to allow multiple instances to act as a
+// hot standby for a leader that services all requests.
+type HABackend interface {
+ // LockWith is used for mutual exclusion based on the given key.
+ LockWith(key, value string) (Lock, error)
+
+ // Whether or not HA functionality is enabled
+ HAEnabled() bool
+}
+
+// Purgable is an optional interface for backends that support
+// purging of their caches.
+type Purgable interface {
+ Purge()
+}
+
+// RedirectDetect is an optional interface that an HABackend
+// can implement. If they do, a redirect address can be automatically
+// detected.
+type RedirectDetect interface {
+ // DetectHostAddr is used to detect the host address
+ DetectHostAddr() (string, error)
+}
+
+// Callback signatures for RunServiceDiscovery
+type activeFunction func() bool
+type sealedFunction func() bool
+
+// ServiceDiscovery is an optional interface that an HABackend can implement.
+// If they do, the state of a backend is advertised to the service discovery
+// network.
+type ServiceDiscovery interface {
+ // NotifyActiveStateChange is used by Core to notify a backend
+ // capable of ServiceDiscovery that this Vault instance has changed
+ // its status to active or standby.
+ NotifyActiveStateChange() error
+
+ // NotifySealedStateChange is used by Core to notify a backend
+ // capable of ServiceDiscovery that Vault has changed its Sealed
+ // status to sealed or unsealed.
+ NotifySealedStateChange() error
+
+ // Run executes any background service discovery tasks until the
+ // shutdown channel is closed.
+ RunServiceDiscovery(waitGroup *sync.WaitGroup, shutdownCh ShutdownChannel, redirectAddr string, activeFunc activeFunction, sealedFunc sealedFunction) error
+}
+
+type Lock interface {
+ // Lock is used to acquire the given lock
+ // The stopCh is optional and if closed should interrupt the lock
+ // acquisition attempt. The return struct should be closed when
+ // leadership is lost.
+ Lock(stopCh <-chan struct{}) (<-chan struct{}, error)
+
+ // Unlock is used to release the lock
+ Unlock() error
+
+ // Returns the value of the lock and if it is held
+ Value() (bool, string, error)
+}
+
+// Entry is used to represent data stored by the physical backend
+type Entry struct {
+ Key string
+ Value []byte
+}
+
+// Factory is the factory function to create a physical backend.
+type Factory func(config map[string]string, logger log.Logger) (Backend, error)
+
+// NewBackend returns a new backend with the given type and configuration.
+// The backend is looked up in the builtinBackends variable.
+func NewBackend(t string, logger log.Logger, conf map[string]string) (Backend, error) {
+ f, ok := builtinBackends[t]
+ if !ok {
+ return nil, fmt.Errorf("unknown physical backend type: %s", t)
+ }
+ return f(conf, logger)
+}
+
+// BuiltinBackends is the list of built-in physical backends that can
+// be used with NewBackend.
+var builtinBackends = map[string]Factory{
+ "inmem": func(_ map[string]string, logger log.Logger) (Backend, error) {
+ return NewInmem(logger), nil
+ },
+ "inmem_transactional": func(_ map[string]string, logger log.Logger) (Backend, error) {
+ return NewTransactionalInmem(logger), nil
+ },
+ "inmem_ha": func(_ map[string]string, logger log.Logger) (Backend, error) {
+ return NewInmemHA(logger), nil
+ },
+ "inmem_transactional_ha": func(_ map[string]string, logger log.Logger) (Backend, error) {
+ return NewTransactionalInmemHA(logger), nil
+ },
+ "file_transactional": newTransactionalFileBackend,
+ "consul": newConsulBackend,
+ "zookeeper": newZookeeperBackend,
+ "file": newFileBackend,
+ "s3": newS3Backend,
+ "azure": newAzureBackend,
+ "dynamodb": newDynamoDBBackend,
+ "etcd": newEtcdBackend,
+ "mssql": newMsSQLBackend,
+ "mysql": newMySQLBackend,
+ "postgresql": newPostgreSQLBackend,
+ "swift": newSwiftBackend,
+ "gcs": newGCSBackend,
+}
+
+// PermitPool is used to limit maximum outstanding requests
+type PermitPool struct {
+ sem chan int
+}
+
+// NewPermitPool returns a new permit pool with the provided
+// number of permits
+func NewPermitPool(permits int) *PermitPool {
+ if permits < 1 {
+ permits = DefaultParallelOperations
+ }
+ return &PermitPool{
+ sem: make(chan int, permits),
+ }
+}
+
+// Acquire returns when a permit has been acquired
+func (c *PermitPool) Acquire() {
+ c.sem <- 1
+}
+
+// Release returns a permit to the pool
+func (c *PermitPool) Release() {
+ <-c.sem
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/physical_test.go b/vendor/github.com/hashicorp/vault/physical/physical_test.go
new file mode 100644
index 0000000..de1b9cb
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/physical_test.go
@@ -0,0 +1,636 @@
+package physical
+
+import (
+ "reflect"
+ "sort"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/helper/logformat"
+ log "github.com/mgutz/logxi/v1"
+)
+
+func testNewBackend(t *testing.T) {
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ _, err := NewBackend("foobar", logger, nil)
+ if err == nil {
+ t.Fatalf("expected error")
+ }
+
+ b, err := NewBackend("inmem", logger, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if b == nil {
+ t.Fatalf("expected backend")
+ }
+}
+
+func testBackend(t *testing.T, b Backend) {
+ // Should be empty
+ keys, err := b.List("")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(keys) != 0 {
+ t.Fatalf("bad: %v", keys)
+ }
+
+ // Delete should work if it does not exist
+ err = b.Delete("foo")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Get should fail
+ out, err := b.Get("foo")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out != nil {
+ t.Fatalf("bad: %v", out)
+ }
+
+ // Make an entry
+ e := &Entry{Key: "foo", Value: []byte("test")}
+ err = b.Put(e)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Get should work
+ out, err = b.Get("foo")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !reflect.DeepEqual(out, e) {
+ t.Fatalf("bad: %v expected: %v", out, e)
+ }
+
+ // List should not be empty
+ keys, err = b.List("")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(keys) != 1 {
+ t.Fatalf("bad: %v", keys)
+ }
+ if keys[0] != "foo" {
+ t.Fatalf("bad: %v", keys)
+ }
+
+ // Delete should work
+ err = b.Delete("foo")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should be empty
+ keys, err = b.List("")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(keys) != 0 {
+ t.Fatalf("bad: %v", keys)
+ }
+
+ // Get should fail
+ out, err = b.Get("foo")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out != nil {
+ t.Fatalf("bad: %v", out)
+ }
+
+ // Multiple Puts should work; GH-189
+ e = &Entry{Key: "foo", Value: []byte("test")}
+ err = b.Put(e)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ e = &Entry{Key: "foo", Value: []byte("test")}
+ err = b.Put(e)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Make a nested entry
+ e = &Entry{Key: "foo/bar", Value: []byte("baz")}
+ err = b.Put(e)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ keys, err = b.List("")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(keys) != 2 {
+ t.Fatalf("bad: %v", keys)
+ }
+ sort.Strings(keys)
+ if keys[0] != "foo" || keys[1] != "foo/" {
+ t.Fatalf("bad: %v", keys)
+ }
+
+ // Delete with children should work
+ err = b.Delete("foo")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Get should return the child
+ out, err = b.Get("foo/bar")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out == nil {
+ t.Fatalf("missing child")
+ }
+
+ // Removal of nested secret should not leave artifacts
+ e = &Entry{Key: "foo/nested1/nested2/nested3", Value: []byte("baz")}
+ err = b.Put(e)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ err = b.Delete("foo/nested1/nested2/nested3")
+ if err != nil {
+ t.Fatalf("failed to remove nested secret: %v", err)
+ }
+
+ keys, err = b.List("foo/")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if len(keys) != 1 {
+ t.Fatalf("there should be only one key left after deleting nested "+
+ "secret: %v", keys)
+ }
+
+ if keys[0] != "bar" {
+ t.Fatalf("bad keys after deleting nested: %v", keys)
+ }
+
+ // Make a second nested entry to test prefix removal
+ e = &Entry{Key: "foo/zip", Value: []byte("zap")}
+ err = b.Put(e)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Delete should not remove the prefix
+ err = b.Delete("foo/bar")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ keys, err = b.List("")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(keys) != 1 {
+ t.Fatalf("bad: %v", keys)
+ }
+ if keys[0] != "foo/" {
+ t.Fatalf("bad: %v", keys)
+ }
+
+ // Delete should remove the prefix
+ err = b.Delete("foo/zip")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ keys, err = b.List("")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(keys) != 0 {
+ t.Fatalf("bad: %v", keys)
+ }
+}
+
+func testBackend_ListPrefix(t *testing.T, b Backend) {
+ e1 := &Entry{Key: "foo", Value: []byte("test")}
+ e2 := &Entry{Key: "foo/bar", Value: []byte("test")}
+ e3 := &Entry{Key: "foo/bar/baz", Value: []byte("test")}
+
+ defer func() {
+ b.Delete("foo")
+ b.Delete("foo/bar")
+ b.Delete("foo/bar/baz")
+ }()
+
+ err := b.Put(e1)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ err = b.Put(e2)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ err = b.Put(e3)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Scan the root
+ keys, err := b.List("")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(keys) != 2 {
+ t.Fatalf("bad: %v", keys)
+ }
+ sort.Strings(keys)
+ if keys[0] != "foo" {
+ t.Fatalf("bad: %v", keys)
+ }
+ if keys[1] != "foo/" {
+ t.Fatalf("bad: %v", keys)
+ }
+
+ // Scan foo/
+ keys, err = b.List("foo/")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(keys) != 2 {
+ t.Fatalf("bad: %v", keys)
+ }
+ sort.Strings(keys)
+ if keys[0] != "bar" {
+ t.Fatalf("bad: %v", keys)
+ }
+ if keys[1] != "bar/" {
+ t.Fatalf("bad: %v", keys)
+ }
+
+ // Scan foo/bar/
+ keys, err = b.List("foo/bar/")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ sort.Strings(keys)
+ if len(keys) != 1 {
+ t.Fatalf("bad: %v", keys)
+ }
+ if keys[0] != "baz" {
+ t.Fatalf("bad: %v", keys)
+ }
+}
+
+func testHABackend(t *testing.T, b HABackend, b2 HABackend) {
+ // Get the lock
+ lock, err := b.LockWith("foo", "bar")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Attempt to lock
+ leaderCh, err := lock.Lock(nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if leaderCh == nil {
+ t.Fatalf("failed to get leader ch")
+ }
+
+ // Check the value
+ held, val, err := lock.Value()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !held {
+ t.Fatalf("should be held")
+ }
+ if val != "bar" {
+ t.Fatalf("bad value: %v", err)
+ }
+
+ // Second acquisition should fail
+ lock2, err := b2.LockWith("foo", "baz")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Cancel attempt in 50 msec
+ stopCh := make(chan struct{})
+ time.AfterFunc(50*time.Millisecond, func() {
+ close(stopCh)
+ })
+
+ // Attempt to lock
+ leaderCh2, err := lock2.Lock(stopCh)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if leaderCh2 != nil {
+ t.Fatalf("should not get leader ch")
+ }
+
+ // Release the first lock
+ lock.Unlock()
+
+ // Attempt to lock should work
+ leaderCh2, err = lock2.Lock(nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if leaderCh2 == nil {
+ t.Fatalf("should get leader ch")
+ }
+
+ // Check the value
+ held, val, err = lock.Value()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !held {
+ t.Fatalf("should be held")
+ }
+ if val != "baz" {
+ t.Fatalf("bad value: %v", err)
+ }
+ // Cleanup
+ lock2.Unlock()
+}
+
+type delays struct {
+ beforeGet time.Duration
+ beforeList time.Duration
+}
+
+func testEventuallyConsistentBackend(t *testing.T, b Backend, d delays) {
+
+ // no delay required: nothing written to bucket
+ // Should be empty
+ keys, err := b.List("")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(keys) != 0 {
+ t.Fatalf("bad: %v", keys)
+ }
+
+ // Delete should work if it does not exist
+ err = b.Delete("foo")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // no delay required: nothing written to bucket
+ // Get should fail
+ out, err := b.Get("foo")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out != nil {
+ t.Fatalf("bad: %v", out)
+ }
+
+ // Make an entry
+ e := &Entry{Key: "foo", Value: []byte("test")}
+ err = b.Put(e)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Get should work
+ time.Sleep(d.beforeGet)
+ out, err = b.Get("foo")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !reflect.DeepEqual(out, e) {
+ t.Fatalf("bad: %v expected: %v", out, e)
+ }
+
+ // List should not be empty
+ time.Sleep(d.beforeList)
+ keys, err = b.List("")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(keys) != 1 {
+ t.Fatalf("bad: %v", keys)
+ }
+ if keys[0] != "foo" {
+ t.Fatalf("bad: %v", keys)
+ }
+
+ // Delete should work
+ err = b.Delete("foo")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should be empty
+ time.Sleep(d.beforeList)
+ keys, err = b.List("")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(keys) != 0 {
+ t.Fatalf("bad: %v", keys)
+ }
+
+ // Get should fail
+ time.Sleep(d.beforeGet)
+ out, err = b.Get("foo")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out != nil {
+ t.Fatalf("bad: %v", out)
+ }
+
+ // Multiple Puts should work; GH-189
+ e = &Entry{Key: "foo", Value: []byte("test")}
+ err = b.Put(e)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ e = &Entry{Key: "foo", Value: []byte("test")}
+ err = b.Put(e)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Make a nested entry
+ e = &Entry{Key: "foo/bar", Value: []byte("baz")}
+ err = b.Put(e)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ time.Sleep(d.beforeList)
+ keys, err = b.List("")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(keys) != 2 {
+ t.Fatalf("bad: %v", keys)
+ }
+ sort.Strings(keys)
+ if keys[0] != "foo" || keys[1] != "foo/" {
+ t.Fatalf("bad: %v", keys)
+ }
+
+ // Delete with children should work
+ err = b.Delete("foo")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Get should return the child
+ time.Sleep(d.beforeGet)
+ out, err = b.Get("foo/bar")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out == nil {
+ t.Fatalf("missing child")
+ }
+
+ // Removal of nested secret should not leave artifacts
+ e = &Entry{Key: "foo/nested1/nested2/nested3", Value: []byte("baz")}
+ err = b.Put(e)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ err = b.Delete("foo/nested1/nested2/nested3")
+ if err != nil {
+ t.Fatalf("failed to remove nested secret: %v", err)
+ }
+
+ time.Sleep(d.beforeList)
+ keys, err = b.List("foo/")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if len(keys) != 1 {
+ t.Fatalf("there should be only one key left after deleting nested "+
+ "secret: %v", keys)
+ }
+
+ if keys[0] != "bar" {
+ t.Fatalf("bad keys after deleting nested: %v", keys)
+ }
+
+ // Make a second nested entry to test prefix removal
+ e = &Entry{Key: "foo/zip", Value: []byte("zap")}
+ err = b.Put(e)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Delete should not remove the prefix
+ err = b.Delete("foo/bar")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ time.Sleep(d.beforeList)
+ keys, err = b.List("")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(keys) != 1 {
+ t.Fatalf("bad: %v", keys)
+ }
+ if keys[0] != "foo/" {
+ t.Fatalf("bad: %v", keys)
+ }
+
+ // Delete should remove the prefix
+ err = b.Delete("foo/zip")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ time.Sleep(d.beforeList)
+ keys, err = b.List("")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(keys) != 0 {
+ t.Fatalf("bad: %v", keys)
+ }
+}
+
+func testEventuallyConsistentBackend_ListPrefix(t *testing.T, b Backend, d delays) {
+ e1 := &Entry{Key: "foo", Value: []byte("test")}
+ e2 := &Entry{Key: "foo/bar", Value: []byte("test")}
+ e3 := &Entry{Key: "foo/bar/baz", Value: []byte("test")}
+
+ err := b.Put(e1)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ err = b.Put(e2)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ err = b.Put(e3)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Scan the root
+ time.Sleep(d.beforeList)
+ keys, err := b.List("")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(keys) != 2 {
+ t.Fatalf("bad: %v", keys)
+ }
+ sort.Strings(keys)
+ if keys[0] != "foo" {
+ t.Fatalf("bad: %v", keys)
+ }
+ if keys[1] != "foo/" {
+ t.Fatalf("bad: %v", keys)
+ }
+
+ // Scan foo/
+ time.Sleep(d.beforeList)
+ keys, err = b.List("foo/")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(keys) != 2 {
+ t.Fatalf("bad: %v", keys)
+ }
+ sort.Strings(keys)
+ if keys[0] != "bar" {
+ t.Fatalf("bad: %v", keys)
+ }
+ if keys[1] != "bar/" {
+ t.Fatalf("bad: %v", keys)
+ }
+
+ // Scan foo/bar/
+ time.Sleep(d.beforeList)
+ keys, err = b.List("foo/bar/")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ sort.Strings(keys)
+ if len(keys) != 1 {
+ t.Fatalf("bad: %v", keys)
+ }
+ if keys[0] != "baz" {
+ t.Fatalf("bad: %v", keys)
+ }
+
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/postgresql.go b/vendor/github.com/hashicorp/vault/physical/postgresql.go
new file mode 100644
index 0000000..2b11d48
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/postgresql.go
@@ -0,0 +1,177 @@
+package physical
+
+import (
+ "database/sql"
+ "fmt"
+ "strings"
+ "time"
+
+ log "github.com/mgutz/logxi/v1"
+
+ "github.com/armon/go-metrics"
+ "github.com/lib/pq"
+)
+
+// PostgreSQL Backend is a physical backend that stores data
+// within a PostgreSQL database.
+type PostgreSQLBackend struct {
+ table string
+ client *sql.DB
+ put_query string
+ get_query string
+ delete_query string
+ list_query string
+ logger log.Logger
+}
+
+// newPostgreSQLBackend constructs a PostgreSQL backend using the given
+// API client, server address, credentials, and database.
+func newPostgreSQLBackend(conf map[string]string, logger log.Logger) (Backend, error) {
+ // Get the PostgreSQL credentials to perform read/write operations.
+ connURL, ok := conf["connection_url"]
+ if !ok || connURL == "" {
+ return nil, fmt.Errorf("missing connection_url")
+ }
+
+ unquoted_table, ok := conf["table"]
+ if !ok {
+ unquoted_table = "vault_kv_store"
+ }
+ quoted_table := pq.QuoteIdentifier(unquoted_table)
+
+ // Create PostgreSQL handle for the database.
+ db, err := sql.Open("postgres", connURL)
+ if err != nil {
+ return nil, fmt.Errorf("failed to connect to postgres: %v", err)
+ }
+
+ // Determine if we should use an upsert function (versions < 9.5)
+ var upsert_required bool
+ upsert_required_query := "SELECT string_to_array(setting, '.')::int[] < '{9,5}' FROM pg_settings WHERE name = 'server_version'"
+ if err := db.QueryRow(upsert_required_query).Scan(&upsert_required); err != nil {
+ return nil, fmt.Errorf("failed to check for native upsert: %v", err)
+ }
+
+ // Setup our put strategy based on the presence or absence of a native
+ // upsert.
+ var put_query string
+ if upsert_required {
+ put_query = "SELECT vault_kv_put($1, $2, $3, $4)"
+ } else {
+ put_query = "INSERT INTO " + quoted_table + " VALUES($1, $2, $3, $4)" +
+ " ON CONFLICT (path, key) DO " +
+ " UPDATE SET (parent_path, path, key, value) = ($1, $2, $3, $4)"
+ }
+
+ // Setup the backend.
+ m := &PostgreSQLBackend{
+ table: quoted_table,
+ client: db,
+ put_query: put_query,
+ get_query: "SELECT value FROM " + quoted_table + " WHERE path = $1 AND key = $2",
+ delete_query: "DELETE FROM " + quoted_table + " WHERE path = $1 AND key = $2",
+ list_query: "SELECT key FROM " + quoted_table + " WHERE path = $1" +
+ "UNION SELECT DISTINCT substring(substr(path, length($1)+1) from '^.*?/') FROM " +
+ quoted_table + " WHERE parent_path LIKE concat($1, '%')",
+ logger: logger,
+ }
+
+ return m, nil
+}
+
+// splitKey is a helper to split a full path key into individual
+// parts: parentPath, path, key
+func (m *PostgreSQLBackend) splitKey(fullPath string) (string, string, string) {
+ var parentPath string
+ var path string
+
+ pieces := strings.Split(fullPath, "/")
+ depth := len(pieces)
+ key := pieces[depth-1]
+
+ if depth == 1 {
+ parentPath = ""
+ path = "/"
+ } else if depth == 2 {
+ parentPath = "/"
+ path = "/" + pieces[0] + "/"
+ } else {
+ parentPath = "/" + strings.Join(pieces[:depth-2], "/") + "/"
+ path = "/" + strings.Join(pieces[:depth-1], "/") + "/"
+ }
+
+ return parentPath, path, key
+}
+
+// Put is used to insert or update an entry.
+func (m *PostgreSQLBackend) Put(entry *Entry) error {
+ defer metrics.MeasureSince([]string{"postgres", "put"}, time.Now())
+
+ parentPath, path, key := m.splitKey(entry.Key)
+
+ _, err := m.client.Exec(m.put_query, parentPath, path, key, entry.Value)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// Get is used to fetch and entry.
+func (m *PostgreSQLBackend) Get(fullPath string) (*Entry, error) {
+ defer metrics.MeasureSince([]string{"postgres", "get"}, time.Now())
+
+ _, path, key := m.splitKey(fullPath)
+
+ var result []byte
+ err := m.client.QueryRow(m.get_query, path, key).Scan(&result)
+ if err == sql.ErrNoRows {
+ return nil, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ ent := &Entry{
+ Key: key,
+ Value: result,
+ }
+ return ent, nil
+}
+
+// Delete is used to permanently delete an entry
+func (m *PostgreSQLBackend) Delete(fullPath string) error {
+ defer metrics.MeasureSince([]string{"postgres", "delete"}, time.Now())
+
+ _, path, key := m.splitKey(fullPath)
+
+ _, err := m.client.Exec(m.delete_query, path, key)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// List is used to list all the keys under a given
+// prefix, up to the next prefix.
+func (m *PostgreSQLBackend) List(prefix string) ([]string, error) {
+ defer metrics.MeasureSince([]string{"postgres", "list"}, time.Now())
+
+ rows, err := m.client.Query(m.list_query, "/"+prefix)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ var keys []string
+ for rows.Next() {
+ var key string
+ err = rows.Scan(&key)
+ if err != nil {
+ return nil, fmt.Errorf("failed to scan rows: %v", err)
+ }
+
+ keys = append(keys, key)
+ }
+
+ return keys, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/postgresql_test.go b/vendor/github.com/hashicorp/vault/physical/postgresql_test.go
new file mode 100644
index 0000000..5cdaaa0
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/postgresql_test.go
@@ -0,0 +1,47 @@
+package physical
+
+import (
+ "os"
+ "testing"
+
+ "github.com/hashicorp/vault/helper/logformat"
+ log "github.com/mgutz/logxi/v1"
+
+ _ "github.com/lib/pq"
+)
+
+func TestPostgreSQLBackend(t *testing.T) {
+ connURL := os.Getenv("PGURL")
+ if connURL == "" {
+ t.SkipNow()
+ }
+
+ table := os.Getenv("PGTABLE")
+ if table == "" {
+ table = "vault_kv_store"
+ }
+
+ // Run vault tests
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ b, err := NewBackend("postgresql", logger, map[string]string{
+ "connection_url": connURL,
+ "table": table,
+ })
+
+ if err != nil {
+ t.Fatalf("Failed to create new backend: %v", err)
+ }
+
+ defer func() {
+ pg := b.(*PostgreSQLBackend)
+ _, err := pg.client.Exec("TRUNCATE TABLE " + pg.table)
+ if err != nil {
+ t.Fatalf("Failed to drop table: %v", err)
+ }
+ }()
+
+ testBackend(t, b)
+ testBackend_ListPrefix(t, b)
+
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/s3.go b/vendor/github.com/hashicorp/vault/physical/s3.go
new file mode 100644
index 0000000..8271be7
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/s3.go
@@ -0,0 +1,246 @@
+package physical
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ log "github.com/mgutz/logxi/v1"
+
+ "github.com/armon/go-metrics"
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/hashicorp/errwrap"
+ cleanhttp "github.com/hashicorp/go-cleanhttp"
+ "github.com/hashicorp/vault/helper/awsutil"
+ "github.com/hashicorp/vault/helper/consts"
+)
+
+// S3Backend is a physical backend that stores data
+// within an S3 bucket.
+type S3Backend struct {
+ bucket string
+ client *s3.S3
+ logger log.Logger
+ permitPool *PermitPool
+}
+
+// newS3Backend constructs a S3 backend using a pre-existing
+// bucket. Credentials can be provided to the backend, sourced
+// from the environment, AWS credential files or by IAM role.
+func newS3Backend(conf map[string]string, logger log.Logger) (Backend, error) {
+
+ bucket := os.Getenv("AWS_S3_BUCKET")
+ if bucket == "" {
+ bucket = conf["bucket"]
+ if bucket == "" {
+ return nil, fmt.Errorf("'bucket' must be set")
+ }
+ }
+
+ accessKey, ok := conf["access_key"]
+ if !ok {
+ accessKey = ""
+ }
+ secretKey, ok := conf["secret_key"]
+ if !ok {
+ secretKey = ""
+ }
+ sessionToken, ok := conf["session_token"]
+ if !ok {
+ sessionToken = ""
+ }
+ endpoint := os.Getenv("AWS_S3_ENDPOINT")
+ if endpoint == "" {
+ endpoint = conf["endpoint"]
+ }
+ region := os.Getenv("AWS_DEFAULT_REGION")
+ if region == "" {
+ region = conf["region"]
+ if region == "" {
+ region = "us-east-1"
+ }
+ }
+
+ credsConfig := &awsutil.CredentialsConfig{
+ AccessKey: accessKey,
+ SecretKey: secretKey,
+ SessionToken: sessionToken,
+ }
+ creds, err := credsConfig.GenerateCredentialChain()
+ if err != nil {
+ return nil, err
+ }
+
+ pooledTransport := cleanhttp.DefaultPooledTransport()
+ pooledTransport.MaxIdleConnsPerHost = consts.ExpirationRestoreWorkerCount
+
+ s3conn := s3.New(session.New(&aws.Config{
+ Credentials: creds,
+ HTTPClient: &http.Client{
+ Transport: pooledTransport,
+ },
+ Endpoint: aws.String(endpoint),
+ Region: aws.String(region),
+ }))
+
+ _, err = s3conn.HeadBucket(&s3.HeadBucketInput{Bucket: &bucket})
+ if err != nil {
+ return nil, fmt.Errorf("unable to access bucket '%s': %v", bucket, err)
+ }
+
+ maxParStr, ok := conf["max_parallel"]
+ var maxParInt int
+ if ok {
+ maxParInt, err = strconv.Atoi(maxParStr)
+ if err != nil {
+ return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
+ }
+ if logger.IsDebug() {
+ logger.Debug("s3: max_parallel set", "max_parallel", maxParInt)
+ }
+ }
+
+ s := &S3Backend{
+ client: s3conn,
+ bucket: bucket,
+ logger: logger,
+ permitPool: NewPermitPool(maxParInt),
+ }
+ return s, nil
+}
+
+// Put is used to insert or update an entry
+func (s *S3Backend) Put(entry *Entry) error {
+ defer metrics.MeasureSince([]string{"s3", "put"}, time.Now())
+
+ s.permitPool.Acquire()
+ defer s.permitPool.Release()
+
+ _, err := s.client.PutObject(&s3.PutObjectInput{
+ Bucket: aws.String(s.bucket),
+ Key: aws.String(entry.Key),
+ Body: bytes.NewReader(entry.Value),
+ })
+
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Get is used to fetch an entry
+func (s *S3Backend) Get(key string) (*Entry, error) {
+ defer metrics.MeasureSince([]string{"s3", "get"}, time.Now())
+
+ s.permitPool.Acquire()
+ defer s.permitPool.Release()
+
+ resp, err := s.client.GetObject(&s3.GetObjectInput{
+ Bucket: aws.String(s.bucket),
+ Key: aws.String(key),
+ })
+ if awsErr, ok := err.(awserr.RequestFailure); ok {
+ // Return nil on 404s, error on anything else
+ if awsErr.StatusCode() == 404 {
+ return nil, nil
+ }
+ return nil, err
+ }
+ if err != nil {
+ return nil, err
+ }
+ if resp == nil {
+ return nil, fmt.Errorf("got nil response from S3 but no error")
+ }
+
+ data := make([]byte, *resp.ContentLength)
+ _, err = io.ReadFull(resp.Body, data)
+ if err != nil {
+ return nil, err
+ }
+
+ ent := &Entry{
+ Key: key,
+ Value: data,
+ }
+
+ return ent, nil
+}
+
+// Delete is used to permanently delete an entry
+func (s *S3Backend) Delete(key string) error {
+ defer metrics.MeasureSince([]string{"s3", "delete"}, time.Now())
+
+ s.permitPool.Acquire()
+ defer s.permitPool.Release()
+
+ _, err := s.client.DeleteObject(&s3.DeleteObjectInput{
+ Bucket: aws.String(s.bucket),
+ Key: aws.String(key),
+ })
+
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// List is used to list all the keys under a given
+// prefix, up to the next prefix.
+func (s *S3Backend) List(prefix string) ([]string, error) {
+ defer metrics.MeasureSince([]string{"s3", "list"}, time.Now())
+
+ s.permitPool.Acquire()
+ defer s.permitPool.Release()
+
+ params := &s3.ListObjectsV2Input{
+ Bucket: aws.String(s.bucket),
+ Prefix: aws.String(prefix),
+ }
+
+ keys := []string{}
+
+ err := s.client.ListObjectsV2Pages(params,
+ func(page *s3.ListObjectsV2Output, lastPage bool) bool {
+ for _, key := range page.Contents {
+ key := strings.TrimPrefix(*key.Key, prefix)
+
+ if i := strings.Index(key, "/"); i == -1 {
+ // Add objects only from the current 'folder'
+ keys = append(keys, key)
+ } else if i != -1 {
+ // Add truncated 'folder' paths
+ keys = appendIfMissing(keys, key[:i+1])
+ }
+ }
+ return true
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ sort.Strings(keys)
+
+ return keys, nil
+}
+
+func appendIfMissing(slice []string, i string) []string {
+ for _, ele := range slice {
+ if ele == i {
+ return slice
+ }
+ }
+ return append(slice, i)
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/s3_test.go b/vendor/github.com/hashicorp/vault/physical/s3_test.go
new file mode 100644
index 0000000..8fdb882
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/s3_test.go
@@ -0,0 +1,93 @@
+package physical
+
+import (
+ "fmt"
+ "math/rand"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/helper/logformat"
+ log "github.com/mgutz/logxi/v1"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/s3"
+)
+
+func TestS3Backend(t *testing.T) {
+ if os.Getenv("AWS_ACCESS_KEY_ID") == "" || os.Getenv("AWS_SECRET_ACCESS_KEY") == "" {
+ t.SkipNow()
+ }
+
+ creds, err := credentials.NewEnvCredentials().Get()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // If the variable is empty or doesn't exist, the default
+ // AWS endpoints will be used
+ endpoint := os.Getenv("AWS_S3_ENDPOINT")
+
+ region := os.Getenv("AWS_DEFAULT_REGION")
+ if region == "" {
+ region = "us-east-1"
+ }
+
+ s3conn := s3.New(session.New(&aws.Config{
+ Credentials: credentials.NewEnvCredentials(),
+ Endpoint: aws.String(endpoint),
+ Region: aws.String(region),
+ }))
+
+ var randInt = rand.New(rand.NewSource(time.Now().UnixNano())).Int()
+ bucket := fmt.Sprintf("vault-s3-testacc-%d", randInt)
+
+ _, err = s3conn.CreateBucket(&s3.CreateBucketInput{
+ Bucket: aws.String(bucket),
+ })
+ if err != nil {
+ t.Fatalf("unable to create test bucket: %s", err)
+ }
+
+ defer func() {
+ // Gotta list all the objects and delete them
+ // before being able to delete the bucket
+ listResp, _ := s3conn.ListObjects(&s3.ListObjectsInput{
+ Bucket: aws.String(bucket),
+ })
+
+ objects := &s3.Delete{}
+ for _, key := range listResp.Contents {
+ oi := &s3.ObjectIdentifier{Key: key.Key}
+ objects.Objects = append(objects.Objects, oi)
+ }
+
+ s3conn.DeleteObjects(&s3.DeleteObjectsInput{
+ Bucket: aws.String(bucket),
+ Delete: objects,
+ })
+
+ _, err := s3conn.DeleteBucket(&s3.DeleteBucketInput{Bucket: aws.String(bucket)})
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ }()
+
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ b, err := NewBackend("s3", logger, map[string]string{
+ "access_key": creds.AccessKeyID,
+ "secret_key": creds.SecretAccessKey,
+ "session_token": creds.SessionToken,
+ "bucket": bucket,
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ testBackend(t, b)
+ testBackend_ListPrefix(t, b)
+
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/swift.go b/vendor/github.com/hashicorp/vault/physical/swift.go
new file mode 100644
index 0000000..0ed4fe6
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/swift.go
@@ -0,0 +1,217 @@
+package physical
+
+import (
+ "fmt"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ log "github.com/mgutz/logxi/v1"
+
+ "github.com/armon/go-metrics"
+ "github.com/hashicorp/errwrap"
+ "github.com/hashicorp/go-cleanhttp"
+ "github.com/ncw/swift"
+)
+
+// SwiftBackend is a physical backend that stores data
+// within an OpenStack Swift container.
+type SwiftBackend struct {
+ container string
+ client *swift.Connection
+ logger log.Logger
+ permitPool *PermitPool
+}
+
+// newSwiftBackend constructs a Swift backend using a pre-existing
+// container. Credentials can be provided to the backend, sourced
+// from the environment.
+func newSwiftBackend(conf map[string]string, logger log.Logger) (Backend, error) {
+
+ username := os.Getenv("OS_USERNAME")
+ if username == "" {
+ username = conf["username"]
+ if username == "" {
+ return nil, fmt.Errorf("missing username")
+ }
+ }
+ password := os.Getenv("OS_PASSWORD")
+ if password == "" {
+ password = conf["password"]
+ if password == "" {
+ return nil, fmt.Errorf("missing password")
+ }
+ }
+ authUrl := os.Getenv("OS_AUTH_URL")
+ if authUrl == "" {
+ authUrl = conf["auth_url"]
+ if authUrl == "" {
+ return nil, fmt.Errorf("missing auth_url")
+ }
+ }
+ container := os.Getenv("OS_CONTAINER")
+ if container == "" {
+ container = conf["container"]
+ if container == "" {
+ return nil, fmt.Errorf("missing container")
+ }
+ }
+ project := os.Getenv("OS_PROJECT_NAME")
+ if project == "" {
+ project = conf["project"]
+
+ if project == "" {
+ // Check for KeyStone naming prior to V3
+ project := os.Getenv("OS_TENANT_NAME")
+ if project == "" {
+ project = conf["tenant"]
+ }
+ }
+ }
+
+ domain := os.Getenv("OS_USER_DOMAIN_NAME")
+ if domain == "" {
+ domain = conf["domain"]
+ }
+ projectDomain := os.Getenv("OS_PROJECT_DOMAIN_NAME")
+ if projectDomain == "" {
+ projectDomain = conf["project-domain"]
+ }
+
+ c := swift.Connection{
+ Domain: domain,
+ UserName: username,
+ ApiKey: password,
+ AuthUrl: authUrl,
+ Tenant: project,
+ TenantDomain: projectDomain,
+ Transport: cleanhttp.DefaultPooledTransport(),
+ }
+
+ err := c.Authenticate()
+ if err != nil {
+ return nil, err
+ }
+
+ _, _, err = c.Container(container)
+ if err != nil {
+ return nil, fmt.Errorf("Unable to access container '%s': %v", container, err)
+ }
+
+ maxParStr, ok := conf["max_parallel"]
+ var maxParInt int
+ if ok {
+ maxParInt, err = strconv.Atoi(maxParStr)
+ if err != nil {
+ return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
+ }
+ if logger.IsDebug() {
+ logger.Debug("swift: max_parallel set", "max_parallel", maxParInt)
+ }
+ }
+
+ s := &SwiftBackend{
+ client: &c,
+ container: container,
+ logger: logger,
+ permitPool: NewPermitPool(maxParInt),
+ }
+ return s, nil
+}
+
+// Put is used to insert or update an entry
+func (s *SwiftBackend) Put(entry *Entry) error {
+ defer metrics.MeasureSince([]string{"swift", "put"}, time.Now())
+
+ s.permitPool.Acquire()
+ defer s.permitPool.Release()
+
+ err := s.client.ObjectPutBytes(s.container, entry.Key, entry.Value, "")
+
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Get is used to fetch an entry
+func (s *SwiftBackend) Get(key string) (*Entry, error) {
+ defer metrics.MeasureSince([]string{"swift", "get"}, time.Now())
+
+ s.permitPool.Acquire()
+ defer s.permitPool.Release()
+
+ //Do a list of names with the key first since eventual consistency means
+ //it might be deleted, but a node might return a read of bytes which fails
+ //the physical test
+ list, err := s.client.ObjectNames(s.container, &swift.ObjectsOpts{Prefix: key})
+ if err != nil {
+ return nil, err
+ }
+ if 0 == len(list) {
+ return nil, nil
+ }
+ data, err := s.client.ObjectGetBytes(s.container, key)
+ if err == swift.ObjectNotFound {
+ return nil, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+ ent := &Entry{
+ Key: key,
+ Value: data,
+ }
+
+ return ent, nil
+}
+
+// Delete is used to permanently delete an entry
+func (s *SwiftBackend) Delete(key string) error {
+ defer metrics.MeasureSince([]string{"swift", "delete"}, time.Now())
+
+ s.permitPool.Acquire()
+ defer s.permitPool.Release()
+
+ err := s.client.ObjectDelete(s.container, key)
+
+ if err != nil && err != swift.ObjectNotFound {
+ return err
+ }
+
+ return nil
+}
+
+// List is used to list all the keys under a given
+// prefix, up to the next prefix.
+func (s *SwiftBackend) List(prefix string) ([]string, error) {
+ defer metrics.MeasureSince([]string{"swift", "list"}, time.Now())
+
+ s.permitPool.Acquire()
+ defer s.permitPool.Release()
+
+ list, err := s.client.ObjectNamesAll(s.container, &swift.ObjectsOpts{Prefix: prefix})
+ if nil != err {
+ return nil, err
+ }
+
+ keys := []string{}
+ for _, key := range list {
+ key := strings.TrimPrefix(key, prefix)
+
+ if i := strings.Index(key, "/"); i == -1 {
+ // Add objects only from the current 'folder'
+ keys = append(keys, key)
+ } else if i != -1 {
+ // Add truncated 'folder' paths
+ keys = appendIfMissing(keys, key[:i+1])
+ }
+ }
+
+ sort.Strings(keys)
+
+ return keys, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/swift_test.go b/vendor/github.com/hashicorp/vault/physical/swift_test.go
new file mode 100644
index 0000000..2da37f0
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/swift_test.go
@@ -0,0 +1,85 @@
+package physical
+
+import (
+ "fmt"
+ "os"
+ "testing"
+ "time"
+
+ log "github.com/mgutz/logxi/v1"
+
+ "github.com/hashicorp/go-cleanhttp"
+ "github.com/hashicorp/vault/helper/logformat"
+ "github.com/ncw/swift"
+)
+
+func TestSwiftBackend(t *testing.T) {
+ if os.Getenv("OS_USERNAME") == "" || os.Getenv("OS_PASSWORD") == "" ||
+ os.Getenv("OS_AUTH_URL") == "" {
+ t.SkipNow()
+ }
+ username := os.Getenv("OS_USERNAME")
+ password := os.Getenv("OS_PASSWORD")
+ authUrl := os.Getenv("OS_AUTH_URL")
+ project := os.Getenv("OS_PROJECT_NAME")
+ domain := os.Getenv("OS_USER_DOMAIN_NAME")
+ projectDomain := os.Getenv("OS_PROJECT_DOMAIN_NAME")
+
+ ts := time.Now().UnixNano()
+ container := fmt.Sprintf("vault-test-%d", ts)
+
+ cleaner := swift.Connection{
+ Domain: domain,
+ UserName: username,
+ ApiKey: password,
+ AuthUrl: authUrl,
+ Tenant: project,
+ TenantDomain: projectDomain,
+ Transport: cleanhttp.DefaultPooledTransport(),
+ }
+
+ err := cleaner.Authenticate()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ err = cleaner.ContainerCreate(container, nil)
+ if nil != err {
+ t.Fatalf("Unable to create test container '%s': %v", container, err)
+ }
+ defer func() {
+ newObjects, err := cleaner.ObjectNamesAll(container, nil)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ for _, o := range newObjects {
+ err := cleaner.ObjectDelete(container, o)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ }
+ err = cleaner.ContainerDelete(container)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ }()
+
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ b, err := NewBackend("swift", logger, map[string]string{
+ "username": username,
+ "password": password,
+ "container": container,
+ "auth_url": authUrl,
+ "project": project,
+ "domain": domain,
+ "project-domain": projectDomain,
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ testBackend(t, b)
+ testBackend_ListPrefix(t, b)
+
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/transactions.go b/vendor/github.com/hashicorp/vault/physical/transactions.go
new file mode 100644
index 0000000..b9ddffa
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/transactions.go
@@ -0,0 +1,121 @@
+package physical
+
+import multierror "github.com/hashicorp/go-multierror"
+
+// TxnEntry is an operation that takes atomically as part of
+// a transactional update. Only supported by Transactional backends.
+type TxnEntry struct {
+ Operation Operation
+ Entry *Entry
+}
+
+// Transactional is an optional interface for backends that
+// support doing transactional updates of multiple keys. This is
+// required for some features such as replication.
+type Transactional interface {
+ // The function to run a transaction
+ Transaction([]TxnEntry) error
+}
+
+type PseudoTransactional interface {
+ // An internal function should do no locking or permit pool acquisition.
+ // Depending on the backend and if it natively supports transactions, these
+ // may simply chain to the normal backend functions.
+ GetInternal(string) (*Entry, error)
+ PutInternal(*Entry) error
+ DeleteInternal(string) error
+}
+
+// Implements the transaction interface
+func genericTransactionHandler(t PseudoTransactional, txns []TxnEntry) (retErr error) {
+ rollbackStack := make([]TxnEntry, 0, len(txns))
+ var dirty bool
+
+ // We walk the transactions in order; each successful operation goes into a
+ // LIFO for rollback if we hit an error along the way
+TxnWalk:
+ for _, txn := range txns {
+ switch txn.Operation {
+ case DeleteOperation:
+ entry, err := t.GetInternal(txn.Entry.Key)
+ if err != nil {
+ retErr = multierror.Append(retErr, err)
+ dirty = true
+ break TxnWalk
+ }
+ if entry == nil {
+ // Nothing to delete or roll back
+ continue
+ }
+ rollbackEntry := TxnEntry{
+ Operation: PutOperation,
+ Entry: &Entry{
+ Key: entry.Key,
+ Value: entry.Value,
+ },
+ }
+ err = t.DeleteInternal(txn.Entry.Key)
+ if err != nil {
+ retErr = multierror.Append(retErr, err)
+ dirty = true
+ break TxnWalk
+ }
+ rollbackStack = append([]TxnEntry{rollbackEntry}, rollbackStack...)
+
+ case PutOperation:
+ entry, err := t.GetInternal(txn.Entry.Key)
+ if err != nil {
+ retErr = multierror.Append(retErr, err)
+ dirty = true
+ break TxnWalk
+ }
+ // Nothing existed so in fact rolling back requires a delete
+ var rollbackEntry TxnEntry
+ if entry == nil {
+ rollbackEntry = TxnEntry{
+ Operation: DeleteOperation,
+ Entry: &Entry{
+ Key: txn.Entry.Key,
+ },
+ }
+ } else {
+ rollbackEntry = TxnEntry{
+ Operation: PutOperation,
+ Entry: &Entry{
+ Key: entry.Key,
+ Value: entry.Value,
+ },
+ }
+ }
+ err = t.PutInternal(txn.Entry)
+ if err != nil {
+ retErr = multierror.Append(retErr, err)
+ dirty = true
+ break TxnWalk
+ }
+ rollbackStack = append([]TxnEntry{rollbackEntry}, rollbackStack...)
+ }
+ }
+
+ // Need to roll back because we hit an error along the way
+ if dirty {
+ // While traversing this, if we get an error, we continue anyways in
+ // best-effort fashion
+ for _, txn := range rollbackStack {
+ switch txn.Operation {
+ case DeleteOperation:
+ err := t.DeleteInternal(txn.Entry.Key)
+ if err != nil {
+ retErr = multierror.Append(retErr, err)
+ }
+ case PutOperation:
+ err := t.PutInternal(txn.Entry)
+ if err != nil {
+ retErr = multierror.Append(retErr, err)
+ }
+ }
+ }
+ }
+
+ return
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/transactions_test.go b/vendor/github.com/hashicorp/vault/physical/transactions_test.go
new file mode 100644
index 0000000..e365a95
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/transactions_test.go
@@ -0,0 +1,254 @@
+package physical
+
+import (
+ "fmt"
+ "reflect"
+ "sort"
+ "testing"
+
+ radix "github.com/armon/go-radix"
+ "github.com/hashicorp/vault/helper/logformat"
+ log "github.com/mgutz/logxi/v1"
+)
+
+type faultyPseudo struct {
+ underlying InmemBackend
+ faultyPaths map[string]struct{}
+}
+
+func (f *faultyPseudo) Get(key string) (*Entry, error) {
+ return f.underlying.Get(key)
+}
+
+func (f *faultyPseudo) Put(entry *Entry) error {
+ return f.underlying.Put(entry)
+}
+
+func (f *faultyPseudo) Delete(key string) error {
+ return f.underlying.Delete(key)
+}
+
+func (f *faultyPseudo) GetInternal(key string) (*Entry, error) {
+ if _, ok := f.faultyPaths[key]; ok {
+ return nil, fmt.Errorf("fault")
+ }
+ return f.underlying.GetInternal(key)
+}
+
+func (f *faultyPseudo) PutInternal(entry *Entry) error {
+ if _, ok := f.faultyPaths[entry.Key]; ok {
+ return fmt.Errorf("fault")
+ }
+ return f.underlying.PutInternal(entry)
+}
+
+func (f *faultyPseudo) DeleteInternal(key string) error {
+ if _, ok := f.faultyPaths[key]; ok {
+ return fmt.Errorf("fault")
+ }
+ return f.underlying.DeleteInternal(key)
+}
+
+func (f *faultyPseudo) List(prefix string) ([]string, error) {
+ return f.underlying.List(prefix)
+}
+
+func (f *faultyPseudo) Transaction(txns []TxnEntry) error {
+ f.underlying.permitPool.Acquire()
+ defer f.underlying.permitPool.Release()
+
+ f.underlying.Lock()
+ defer f.underlying.Unlock()
+
+ return genericTransactionHandler(f, txns)
+}
+
+func newFaultyPseudo(logger log.Logger, faultyPaths []string) *faultyPseudo {
+ out := &faultyPseudo{
+ underlying: InmemBackend{
+ root: radix.New(),
+ permitPool: NewPermitPool(1),
+ logger: logger,
+ },
+ faultyPaths: make(map[string]struct{}, len(faultyPaths)),
+ }
+ for _, v := range faultyPaths {
+ out.faultyPaths[v] = struct{}{}
+ }
+ return out
+}
+
+func TestPseudo_Basic(t *testing.T) {
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+ p := newFaultyPseudo(logger, nil)
+ testBackend(t, p)
+ testBackend_ListPrefix(t, p)
+}
+
+func TestPseudo_SuccessfulTransaction(t *testing.T) {
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+ p := newFaultyPseudo(logger, nil)
+
+ txns := setupPseudo(p, t)
+
+ if err := p.Transaction(txns); err != nil {
+ t.Fatal(err)
+ }
+
+ keys, err := p.List("")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expected := []string{"foo", "zip"}
+
+ sort.Strings(keys)
+ sort.Strings(expected)
+ if !reflect.DeepEqual(keys, expected) {
+ t.Fatalf("mismatch: expected\n%#v\ngot\n%#v\n", expected, keys)
+ }
+
+ entry, err := p.Get("foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if entry == nil {
+ t.Fatal("got nil entry")
+ }
+ if entry.Value == nil {
+ t.Fatal("got nil value")
+ }
+ if string(entry.Value) != "bar3" {
+ t.Fatal("updates did not apply correctly")
+ }
+
+ entry, err = p.Get("zip")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if entry == nil {
+ t.Fatal("got nil entry")
+ }
+ if entry.Value == nil {
+ t.Fatal("got nil value")
+ }
+ if string(entry.Value) != "zap3" {
+ t.Fatal("updates did not apply correctly")
+ }
+}
+
+func TestPseudo_FailedTransaction(t *testing.T) {
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+ p := newFaultyPseudo(logger, []string{"zip"})
+
+ txns := setupPseudo(p, t)
+
+ if err := p.Transaction(txns); err == nil {
+ t.Fatal("expected error during transaction")
+ }
+
+ keys, err := p.List("")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expected := []string{"foo", "zip", "deleteme", "deleteme2"}
+
+ sort.Strings(keys)
+ sort.Strings(expected)
+ if !reflect.DeepEqual(keys, expected) {
+ t.Fatalf("mismatch: expected\n%#v\ngot\n%#v\n", expected, keys)
+ }
+
+ entry, err := p.Get("foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if entry == nil {
+ t.Fatal("got nil entry")
+ }
+ if entry.Value == nil {
+ t.Fatal("got nil value")
+ }
+ if string(entry.Value) != "bar" {
+ t.Fatal("values did not rollback correctly")
+ }
+
+ entry, err = p.Get("zip")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if entry == nil {
+ t.Fatal("got nil entry")
+ }
+ if entry.Value == nil {
+ t.Fatal("got nil value")
+ }
+ if string(entry.Value) != "zap" {
+ t.Fatal("values did not rollback correctly")
+ }
+}
+
+func setupPseudo(p *faultyPseudo, t *testing.T) []TxnEntry {
+ // Add a few keys so that we test rollback with deletion
+ if err := p.Put(&Entry{
+ Key: "foo",
+ Value: []byte("bar"),
+ }); err != nil {
+ t.Fatal(err)
+ }
+ if err := p.Put(&Entry{
+ Key: "zip",
+ Value: []byte("zap"),
+ }); err != nil {
+ t.Fatal(err)
+ }
+ if err := p.Put(&Entry{
+ Key: "deleteme",
+ }); err != nil {
+ t.Fatal(err)
+ }
+ if err := p.Put(&Entry{
+ Key: "deleteme2",
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ txns := []TxnEntry{
+ TxnEntry{
+ Operation: PutOperation,
+ Entry: &Entry{
+ Key: "foo",
+ Value: []byte("bar2"),
+ },
+ },
+ TxnEntry{
+ Operation: DeleteOperation,
+ Entry: &Entry{
+ Key: "deleteme",
+ },
+ },
+ TxnEntry{
+ Operation: PutOperation,
+ Entry: &Entry{
+ Key: "foo",
+ Value: []byte("bar3"),
+ },
+ },
+ TxnEntry{
+ Operation: DeleteOperation,
+ Entry: &Entry{
+ Key: "deleteme2",
+ },
+ },
+ TxnEntry{
+ Operation: PutOperation,
+ Entry: &Entry{
+ Key: "zip",
+ Value: []byte("zap3"),
+ },
+ },
+ }
+
+ return txns
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/zookeeper.go b/vendor/github.com/hashicorp/vault/physical/zookeeper.go
new file mode 100644
index 0000000..6bc9061
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/zookeeper.go
@@ -0,0 +1,445 @@
+package physical
+
+import (
+ "fmt"
+ "path/filepath"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ log "github.com/mgutz/logxi/v1"
+
+ metrics "github.com/armon/go-metrics"
+ "github.com/samuel/go-zookeeper/zk"
+)
+
+const (
+ // ZKNodeFilePrefix is prefixed to any "files" in ZooKeeper,
+ // so that they do not collide with directory entries. Otherwise,
+ // we cannot delete a file if the path is a full-prefix of another
+ // key.
+ ZKNodeFilePrefix = "_"
+)
+
+// ZookeeperBackend is a physical backend that stores data at specific
+// prefix within Zookeeper. It is used in production situations as
+// it allows Vault to run on multiple machines in a highly-available manner.
+type ZookeeperBackend struct {
+ path string
+ client *zk.Conn
+ acl []zk.ACL
+ logger log.Logger
+}
+
+// newZookeeperBackend constructs a Zookeeper backend using the given API client
+// and the prefix in the KV store.
+func newZookeeperBackend(conf map[string]string, logger log.Logger) (Backend, error) {
+ // Get the path in Zookeeper
+ path, ok := conf["path"]
+ if !ok {
+ path = "vault/"
+ }
+
+ // Ensure path is suffixed and prefixed (zk requires prefix /)
+ if !strings.HasSuffix(path, "/") {
+ path += "/"
+ }
+ if !strings.HasPrefix(path, "/") {
+ path = "/" + path
+ }
+
+ // Configure the client, default to localhost instance
+ var machines string
+ machines, ok = conf["address"]
+ if !ok {
+ machines = "localhost:2181"
+ }
+
+ // zNode owner and schema.
+ var owner string
+ var schema string
+ var schemaAndOwner string
+ schemaAndOwner, ok = conf["znode_owner"]
+ if !ok {
+ owner = "anyone"
+ schema = "world"
+ } else {
+ parsedSchemaAndOwner := strings.SplitN(schemaAndOwner, ":", 2)
+ if len(parsedSchemaAndOwner) != 2 {
+ return nil, fmt.Errorf("znode_owner expected format is 'schema:owner'")
+ } else {
+ schema = parsedSchemaAndOwner[0]
+ owner = parsedSchemaAndOwner[1]
+
+ // znode_owner is in config and structured correctly - but does it make any sense?
+ // Either 'owner' or 'schema' was set but not both - this seems like a failed attempt
+ // (e.g. ':MyUser' which omit the schema, or ':' omitting both)
+ if owner == "" || schema == "" {
+ return nil, fmt.Errorf("znode_owner expected format is 'schema:auth'")
+ }
+ }
+ }
+
+ acl := []zk.ACL{{zk.PermAll, schema, owner}}
+
+ // Authnetication info
+ var schemaAndUser string
+ var useAddAuth bool
+ schemaAndUser, useAddAuth = conf["auth_info"]
+ if useAddAuth {
+ parsedSchemaAndUser := strings.SplitN(schemaAndUser, ":", 2)
+ if len(parsedSchemaAndUser) != 2 {
+ return nil, fmt.Errorf("auth_info expected format is 'schema:auth'")
+ } else {
+ schema = parsedSchemaAndUser[0]
+ owner = parsedSchemaAndUser[1]
+
+ // auth_info is in config and structured correctly - but does it make any sense?
+ // Either 'owner' or 'schema' was set but not both - this seems like a failed attempt
+ // (e.g. ':MyUser' which omit the schema, or ':' omitting both)
+ if owner == "" || schema == "" {
+ return nil, fmt.Errorf("auth_info expected format is 'schema:auth'")
+ }
+ }
+ }
+
+ // We have all of the configuration in hand - let's try and connect to ZK
+ client, _, err := zk.Connect(strings.Split(machines, ","), time.Second)
+ if err != nil {
+ return nil, fmt.Errorf("client setup failed: %v", err)
+ }
+
+ // ZK AddAuth API if the user asked for it
+ if useAddAuth {
+ err = client.AddAuth(schema, []byte(owner))
+ if err != nil {
+ return nil, fmt.Errorf("Zookeeper rejected authentication information provided at auth_info: %v", err)
+ }
+ }
+
+ // Setup the backend
+ c := &ZookeeperBackend{
+ path: path,
+ client: client,
+ acl: acl,
+ logger: logger,
+ }
+ return c, nil
+}
+
+// ensurePath is used to create each node in the path hierarchy.
+// We avoid calling this optimistically, and invoke it when we get
+// an error during an operation
+func (c *ZookeeperBackend) ensurePath(path string, value []byte) error {
+ nodes := strings.Split(path, "/")
+ fullPath := ""
+ for index, node := range nodes {
+ if strings.TrimSpace(node) != "" {
+ fullPath += "/" + node
+ isLastNode := index+1 == len(nodes)
+
+ // set parent nodes to nil, leaf to value
+ // this block reduces round trips by being smart on the leaf create/set
+ if exists, _, _ := c.client.Exists(fullPath); !isLastNode && !exists {
+ if _, err := c.client.Create(fullPath, nil, int32(0), c.acl); err != nil {
+ return err
+ }
+ } else if isLastNode && !exists {
+ if _, err := c.client.Create(fullPath, value, int32(0), c.acl); err != nil {
+ return err
+ }
+ } else if isLastNode && exists {
+ if _, err := c.client.Set(fullPath, value, int32(-1)); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// cleanupLogicalPath is used to remove all empty nodes, begining with deepest one,
+// aborting on first non-empty one, up to top-level node.
+func (c *ZookeeperBackend) cleanupLogicalPath(path string) error {
+ nodes := strings.Split(path, "/")
+ for i := len(nodes) - 1; i > 0; i-- {
+ fullPath := c.path + strings.Join(nodes[:i], "/")
+
+ _, stat, err := c.client.Exists(fullPath)
+ if err != nil {
+ return fmt.Errorf("Failed to acquire node data: %s", err)
+ }
+
+ if stat.DataLength > 0 && stat.NumChildren > 0 {
+ msgFmt := "Node %s is both of data and leaf type ??"
+ panic(fmt.Sprintf(msgFmt, fullPath))
+ } else if stat.DataLength > 0 {
+ msgFmt := "Node %s is a data node, this is either a bug or " +
+ "backend data is corrupted"
+ panic(fmt.Sprintf(msgFmt, fullPath))
+ } else if stat.NumChildren > 0 {
+ return nil
+ } else {
+ // Empty node, lets clean it up!
+ if err := c.client.Delete(fullPath, -1); err != nil && err != zk.ErrNoNode {
+ msgFmt := "Removal of node `%s` failed: `%v`"
+ return fmt.Errorf(msgFmt, fullPath, err)
+ }
+ }
+ }
+ return nil
+}
+
+// nodePath returns an zk path based on the given key.
+func (c *ZookeeperBackend) nodePath(key string) string {
+ return filepath.Join(c.path, filepath.Dir(key), ZKNodeFilePrefix+filepath.Base(key))
+}
+
+// Put is used to insert or update an entry
+func (c *ZookeeperBackend) Put(entry *Entry) error {
+ defer metrics.MeasureSince([]string{"zookeeper", "put"}, time.Now())
+
+ // Attempt to set the full path
+ fullPath := c.nodePath(entry.Key)
+ _, err := c.client.Set(fullPath, entry.Value, -1)
+
+ // If we get ErrNoNode, we need to construct the path hierarchy
+ if err == zk.ErrNoNode {
+ return c.ensurePath(fullPath, entry.Value)
+ }
+ return err
+}
+
+// Get is used to fetch an entry
+func (c *ZookeeperBackend) Get(key string) (*Entry, error) {
+ defer metrics.MeasureSince([]string{"zookeeper", "get"}, time.Now())
+
+ // Attempt to read the full path
+ fullPath := c.nodePath(key)
+ value, _, err := c.client.Get(fullPath)
+
+ // Ignore if the node does not exist
+ if err == zk.ErrNoNode {
+ err = nil
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // Handle a non-existing value
+ if value == nil {
+ return nil, nil
+ }
+ ent := &Entry{
+ Key: key,
+ Value: value,
+ }
+ return ent, nil
+}
+
+// Delete is used to permanently delete an entry
+func (c *ZookeeperBackend) Delete(key string) error {
+ defer metrics.MeasureSince([]string{"zookeeper", "delete"}, time.Now())
+
+ if key == "" {
+ return nil
+ }
+
+ // Delete the full path
+ fullPath := c.nodePath(key)
+ err := c.client.Delete(fullPath, -1)
+
+ // Mask if the node does not exist
+ if err != nil && err != zk.ErrNoNode {
+ return fmt.Errorf("Failed to remove %q: %v", fullPath, err)
+ }
+
+ err = c.cleanupLogicalPath(key)
+
+ return err
+}
+
+// List is used ot list all the keys under a given
+// prefix, up to the next prefix.
+func (c *ZookeeperBackend) List(prefix string) ([]string, error) {
+ defer metrics.MeasureSince([]string{"zookeeper", "list"}, time.Now())
+
+ // Query the children at the full path
+ fullPath := strings.TrimSuffix(c.path+prefix, "/")
+ result, _, err := c.client.Children(fullPath)
+
+ // If the path nodes are missing, no children!
+ if err == zk.ErrNoNode {
+ return []string{}, nil
+ } else if err != nil {
+ return []string{}, err
+ }
+
+ children := []string{}
+ for _, key := range result {
+ childPath := fullPath + "/" + key
+ _, stat, err := c.client.Exists(childPath)
+ if err != nil {
+ // Node is ought to exists, so it must be something different
+ return []string{}, err
+ }
+
+ // Check if this entry is a leaf of a node,
+ // and append the slash which is what Vault depends on
+ // for iteration
+ if stat.DataLength > 0 && stat.NumChildren > 0 {
+ msgFmt := "Node %q is both of data and leaf type ??"
+ panic(fmt.Sprintf(msgFmt, childPath))
+ } else if stat.DataLength == 0 {
+ // No, we cannot differentiate here on number of children as node
+ // can have all it leafs remoed, and it still is a node.
+ children = append(children, key+"/")
+ } else {
+ children = append(children, key[1:])
+ }
+ }
+ sort.Strings(children)
+ return children, nil
+}
+
+// LockWith is used for mutual exclusion based on the given key.
+func (c *ZookeeperBackend) LockWith(key, value string) (Lock, error) {
+ l := &ZookeeperHALock{
+ in: c,
+ key: key,
+ value: value,
+ }
+ return l, nil
+}
+
+// HAEnabled indicates whether the HA functionality should be exposed.
+// Currently always returns true.
+func (c *ZookeeperBackend) HAEnabled() bool {
+ return true
+}
+
+// ZookeeperHALock is a Zookeeper Lock implementation for the HABackend
+type ZookeeperHALock struct {
+ in *ZookeeperBackend
+ key string
+ value string
+
+ held bool
+ localLock sync.Mutex
+ leaderCh chan struct{}
+ zkLock *zk.Lock
+}
+
+func (i *ZookeeperHALock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
+ i.localLock.Lock()
+ defer i.localLock.Unlock()
+ if i.held {
+ return nil, fmt.Errorf("lock already held")
+ }
+
+ // Attempt an async acquisition
+ didLock := make(chan struct{})
+ failLock := make(chan error, 1)
+ releaseCh := make(chan bool, 1)
+ lockpath := i.in.nodePath(i.key)
+ go i.attemptLock(lockpath, didLock, failLock, releaseCh)
+
+ // Wait for lock acquisition, failure, or shutdown
+ select {
+ case <-didLock:
+ releaseCh <- false
+ case err := <-failLock:
+ return nil, err
+ case <-stopCh:
+ releaseCh <- true
+ return nil, nil
+ }
+
+ // Create the leader channel
+ i.held = true
+ i.leaderCh = make(chan struct{})
+
+ // Watch for Events which could result in loss of our zkLock and close(i.leaderCh)
+ currentVal, _, lockeventCh, err := i.in.client.GetW(lockpath)
+ if err != nil {
+ return nil, fmt.Errorf("unable to watch HA lock: %v", err)
+ }
+ if i.value != string(currentVal) {
+ return nil, fmt.Errorf("lost HA lock immediately before watch")
+ }
+ go i.monitorLock(lockeventCh, i.leaderCh)
+
+ return i.leaderCh, nil
+}
+
+func (i *ZookeeperHALock) attemptLock(lockpath string, didLock chan struct{}, failLock chan error, releaseCh chan bool) {
+ // Wait to acquire the lock in ZK
+ lock := zk.NewLock(i.in.client, lockpath, i.in.acl)
+ err := lock.Lock()
+ if err != nil {
+ failLock <- err
+ return
+ }
+ // Set node value
+ data := []byte(i.value)
+ err = i.in.ensurePath(lockpath, data)
+ if err != nil {
+ failLock <- err
+ lock.Unlock()
+ return
+ }
+ i.zkLock = lock
+
+ // Signal that lock is held
+ close(didLock)
+
+ // Handle an early abort
+ release := <-releaseCh
+ if release {
+ lock.Unlock()
+ }
+}
+
+func (i *ZookeeperHALock) monitorLock(lockeventCh <-chan zk.Event, leaderCh chan struct{}) {
+ for {
+ select {
+ case event := <-lockeventCh:
+ // Lost connection?
+ switch event.State {
+ case zk.StateConnected:
+ case zk.StateHasSession:
+ default:
+ close(leaderCh)
+ return
+ }
+
+ // Lost lock?
+ switch event.Type {
+ case zk.EventNodeChildrenChanged:
+ case zk.EventSession:
+ default:
+ close(leaderCh)
+ return
+ }
+ }
+ }
+}
+
+func (i *ZookeeperHALock) Unlock() error {
+ i.localLock.Lock()
+ defer i.localLock.Unlock()
+ if !i.held {
+ return nil
+ }
+
+ i.held = false
+ i.zkLock.Unlock()
+ return nil
+}
+
+func (i *ZookeeperHALock) Value() (bool, string, error) {
+ lockpath := i.in.nodePath(i.key)
+ value, _, err := i.in.client.Get(lockpath)
+ return (value != nil), string(value), err
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/zookeeper_test.go b/vendor/github.com/hashicorp/vault/physical/zookeeper_test.go
new file mode 100644
index 0000000..b9969ae
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/zookeeper_test.go
@@ -0,0 +1,101 @@
+package physical
+
+import (
+ "fmt"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/helper/logformat"
+ log "github.com/mgutz/logxi/v1"
+
+ "github.com/samuel/go-zookeeper/zk"
+)
+
+func TestZookeeperBackend(t *testing.T) {
+ addr := os.Getenv("ZOOKEEPER_ADDR")
+ if addr == "" {
+ t.SkipNow()
+ }
+
+ client, _, err := zk.Connect([]string{addr}, time.Second)
+
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ randPath := fmt.Sprintf("/vault-%d", time.Now().Unix())
+ acl := zk.WorldACL(zk.PermAll)
+ _, err = client.Create(randPath, []byte("hi"), int32(0), acl)
+
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ defer func() {
+ client.Delete(randPath+"/foo/nested1/nested2/nested3", -1)
+ client.Delete(randPath+"/foo/nested1/nested2", -1)
+ client.Delete(randPath+"/foo/nested1", -1)
+ client.Delete(randPath+"/foo/bar/baz", -1)
+ client.Delete(randPath+"/foo/bar", -1)
+ client.Delete(randPath+"/foo", -1)
+ client.Delete(randPath, -1)
+ client.Close()
+ }()
+
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ b, err := NewBackend("zookeeper", logger, map[string]string{
+ "address": addr + "," + addr,
+ "path": randPath,
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ testBackend(t, b)
+ testBackend_ListPrefix(t, b)
+}
+
+func TestZookeeperHABackend(t *testing.T) {
+ addr := os.Getenv("ZOOKEEPER_ADDR")
+ if addr == "" {
+ t.SkipNow()
+ }
+
+ client, _, err := zk.Connect([]string{addr}, time.Second)
+
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ randPath := fmt.Sprintf("/vault-ha-%d", time.Now().Unix())
+ acl := zk.WorldACL(zk.PermAll)
+ _, err = client.Create(randPath, []byte("hi"), int32(0), acl)
+
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ defer func() {
+ client.Delete(randPath+"/foo", -1)
+ client.Delete(randPath, -1)
+ client.Close()
+ }()
+
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ b, err := NewBackend("zookeeper", logger, map[string]string{
+ "address": addr + "," + addr,
+ "path": randPath,
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ ha, ok := b.(HABackend)
+ if !ok {
+ t.Fatalf("zookeeper does not implement HABackend")
+ }
+ testHABackend(t, ha, ha)
+}
diff --git a/vendor/github.com/hashicorp/vault/scripts/build.sh b/vendor/github.com/hashicorp/vault/scripts/build.sh
new file mode 100755
index 0000000..6a1cb51
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/scripts/build.sh
@@ -0,0 +1,83 @@
+#!/usr/bin/env bash
+#
+# This script builds the application from source for multiple platforms.
+set -e
+
+# Get the parent directory of where this script is.
+SOURCE="${BASH_SOURCE[0]}"
+while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
+DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )"
+
+# Change into that directory
+cd "$DIR"
+
+# Set build tags
+BUILD_TAGS="${BUILD_TAGS:-"vault"}"
+
+# Get the git commit
+GIT_COMMIT="$(git rev-parse HEAD)"
+GIT_DIRTY="$(test -n "`git status --porcelain`" && echo "+CHANGES" || true)"
+
+# Determine the arch/os combos we're building for
+XC_ARCH=${XC_ARCH:-"386 amd64"}
+XC_OS=${XC_OS:-linux darwin windows freebsd openbsd netbsd solaris}
+XC_OSARCH=${XC_OSARCH:-"linux/386 linux/amd64 linux/arm linux/arm64 darwin/386 darwin/amd64 windows/386 windows/amd64 freebsd/386 freebsd/amd64 freebsd/arm openbsd/386 openbsd/amd64 openbsd/arm netbsd/386 netbsd/amd64 netbsd/arm solaris/amd64"}
+
+GOPATH=${GOPATH:-$(go env GOPATH)}
+case $(uname) in
+ CYGWIN*)
+ GOPATH="$(cygpath $GOPATH)"
+ ;;
+esac
+
+# Delete the old dir
+echo "==> Removing old directory..."
+rm -f bin/*
+rm -rf pkg/*
+mkdir -p bin/
+
+# If its dev mode, only build for ourself
+if [ "${VAULT_DEV_BUILD}x" != "x" ]; then
+ XC_OS=$(go env GOOS)
+ XC_ARCH=$(go env GOARCH)
+ XC_OSARCH=$(go env GOOS)/$(go env GOARCH)
+fi
+
+# Build!
+echo "==> Building..."
+gox \
+ -osarch="${XC_OSARCH}" \
+ -ldflags "-X github.com/hashicorp/vault/version.GitCommit='${GIT_COMMIT}${GIT_DIRTY}'" \
+ -output "pkg/{{.OS}}_{{.Arch}}/vault" \
+ -tags="${BUILD_TAGS}" \
+ .
+
+# Move all the compiled things to the $GOPATH/bin
+OLDIFS=$IFS
+IFS=: MAIN_GOPATH=($GOPATH)
+IFS=$OLDIFS
+
+# Copy our OS/Arch to the bin/ directory
+DEV_PLATFORM="./pkg/$(go env GOOS)_$(go env GOARCH)"
+for F in $(find ${DEV_PLATFORM} -mindepth 1 -maxdepth 1 -type f); do
+ cp ${F} bin/
+ cp ${F} ${MAIN_GOPATH}/bin/
+done
+
+if [ "${VAULT_DEV_BUILD}x" = "x" ]; then
+ # Zip and copy to the dist dir
+ echo "==> Packaging..."
+ for PLATFORM in $(find ./pkg -mindepth 1 -maxdepth 1 -type d); do
+ OSARCH=$(basename ${PLATFORM})
+ echo "--> ${OSARCH}"
+
+ pushd $PLATFORM >/dev/null 2>&1
+ zip ../${OSARCH}.zip ./*
+ popd >/dev/null 2>&1
+ done
+fi
+
+# Done!
+echo
+echo "==> Results:"
+ls -hl bin/
diff --git a/vendor/github.com/hashicorp/vault/scripts/coverage.sh b/vendor/github.com/hashicorp/vault/scripts/coverage.sh
new file mode 100755
index 0000000..ad80496
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/scripts/coverage.sh
@@ -0,0 +1,52 @@
+#!/bin/sh
+# Generate test coverage statistics for Go packages.
+#
+# Works around the fact that `go test -coverprofile` currently does not work
+# with multiple packages, see https://code.google.com/p/go/issues/detail?id=6909
+#
+# Usage: script/coverage [--html|--coveralls]
+#
+# --html Additionally create HTML report and open it in browser
+# --coveralls Push coverage statistics to coveralls.io
+#
+
+set -e
+
+workdir=.cover
+profile="$workdir/cover.out"
+mode=count
+
+generate_cover_data() {
+ rm -rf "$workdir"
+ mkdir "$workdir"
+
+ for pkg in "$@"; do
+ f="$workdir/$(echo $pkg | tr / -).cover"
+ go test -covermode="$mode" -coverprofile="$f" "$pkg"
+ done
+
+ echo "mode: $mode" >"$profile"
+ grep -h -v "^mode:" "$workdir"/*.cover >>"$profile"
+}
+
+show_cover_report() {
+ go tool cover -${1}="$profile"
+}
+
+push_to_coveralls() {
+ echo "Pushing coverage statistics to coveralls.io"
+ goveralls -coverprofile="$profile"
+}
+
+generate_cover_data $(go list ./... | grep -v /vendor/)
+show_cover_report func
+case "$1" in
+"")
+ ;;
+--html)
+ show_cover_report html ;;
+--coveralls)
+ push_to_coveralls ;;
+*)
+ echo >&2 "error: invalid option: $1"; exit 1 ;;
+esac
diff --git a/vendor/github.com/hashicorp/vault/scripts/cross/Dockerfile b/vendor/github.com/hashicorp/vault/scripts/cross/Dockerfile
new file mode 100644
index 0000000..7126b63
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/scripts/cross/Dockerfile
@@ -0,0 +1,26 @@
+# Adapted from tcnksm/dockerfile-gox -- thanks!
+
+FROM debian:jessie
+
+RUN apt-get update -y && apt-get install --no-install-recommends -y -q \
+ curl \
+ zip \
+ build-essential \
+ ca-certificates \
+ git mercurial bzr \
+ && rm -rf /var/lib/apt/lists/*
+
+ENV GOVERSION 1.8.1
+RUN mkdir /goroot && mkdir /gopath
+RUN curl https://storage.googleapis.com/golang/go${GOVERSION}.linux-amd64.tar.gz \
+ | tar xvzf - -C /goroot --strip-components=1
+
+ENV GOPATH /gopath
+ENV GOROOT /goroot
+ENV PATH $GOROOT/bin:$GOPATH/bin:$PATH
+
+RUN go get github.com/mitchellh/gox
+
+RUN mkdir -p /gopath/src/github.com/hashicorp/vault
+WORKDIR /gopath/src/github.com/hashicorp/vault
+CMD make bin
diff --git a/vendor/github.com/hashicorp/vault/scripts/dist.sh b/vendor/github.com/hashicorp/vault/scripts/dist.sh
new file mode 100755
index 0000000..1e1b2f7
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/scripts/dist.sh
@@ -0,0 +1,76 @@
+#!/usr/bin/env bash
+set -e
+
+# Get the version from the command line
+VERSION=$1
+if [ -z $VERSION ]; then
+ echo "Please specify a version."
+ exit 1
+fi
+
+# Make sure we have AWS API keys
+if ([ -z $AWS_ACCESS_KEY_ID ] || [ -z $AWS_SECRET_ACCESS_KEY ]) && [ ! -z $HC_RELEASE ]; then
+ echo "Please set your AWS access key information in the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY env vars."
+ exit 1
+fi
+
+if [ -z $NOBUILD ] && [ -z $DOCKER_CROSS_IMAGE ]; then
+ echo "Please set the Docker cross-compile image in DOCKER_CROSS_IMAGE"
+ exit 1
+fi
+
+# Get the parent directory of where this script is.
+SOURCE="${BASH_SOURCE[0]}"
+while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
+DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )"
+
+# Change into that dir because we expect that
+cd $DIR
+
+if [ -z $RELBRANCH ]; then
+ RELBRANCH=master
+fi
+
+# Tag, unless told not to
+if [ -z $NOTAG ]; then
+ echo "==> Tagging..."
+ git commit --allow-empty --gpg-sign=348FFC4C -m "Cut version $VERSION"
+ git tag -a -m "Version $VERSION" -s -u 348FFC4C "v${VERSION}" $RELBRANCH
+fi
+
+# Build the packages
+if [ -z $NOBUILD ]; then
+ # This should be a local build of the Dockerfile in the cross dir
+ docker run --rm -v "$(pwd)":/gopath/src/github.com/hashicorp/vault -w /gopath/src/github.com/hashicorp/vault ${DOCKER_CROSS_IMAGE}
+fi
+
+# Zip all the files
+rm -rf ./pkg/dist
+mkdir -p ./pkg/dist
+for FILENAME in $(find ./pkg -mindepth 1 -maxdepth 1 -type f); do
+ FILENAME=$(basename $FILENAME)
+ cp ./pkg/${FILENAME} ./pkg/dist/vault_${VERSION}_${FILENAME}
+done
+
+if [ -z $NOSIGN ]; then
+ echo "==> Signing..."
+ pushd ./pkg/dist
+ rm -f ./vault_${VERSION}_SHA256SUMS*
+ shasum -a256 * > ./vault_${VERSION}_SHA256SUMS
+ gpg --default-key 348FFC4C --detach-sig ./vault_${VERSION}_SHA256SUMS
+ popd
+fi
+
+# Upload
+if [ ! -z $HC_RELEASE ]; then
+ hc-releases upload $DIR/pkg/dist
+ hc-releases publish
+
+ curl -X PURGE https://releases.hashicorp.com/vault/${VERSION}
+ for FILENAME in $(find $DIR/pkg/dist -type f); do
+ FILENAME=$(basename $FILENAME)
+ curl -X PURGE https://releases.hashicorp.com/vault/${VERSION}/${FILENAME}
+ done
+fi
+
+exit 0
diff --git a/vendor/github.com/hashicorp/vault/scripts/update_deps.sh b/vendor/github.com/hashicorp/vault/scripts/update_deps.sh
new file mode 100755
index 0000000..80241fc
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/scripts/update_deps.sh
@@ -0,0 +1,35 @@
+#!/bin/sh
+
+set -e
+
+TOOL=vault
+
+## Make a temp dir
+tempdir=$(mktemp -d update-${TOOL}-deps.XXXXXX)
+
+## Set paths
+export GOPATH="$(pwd)/${tempdir}"
+export PATH="${GOPATH}/bin:${PATH}"
+cd $tempdir
+
+## Get Vault
+mkdir -p src/github.com/hashicorp
+cd src/github.com/hashicorp
+echo "Fetching ${TOOL}..."
+git clone https://github.com/hashicorp/${TOOL}
+cd ${TOOL}
+
+## Clean out earlier vendoring
+rm -rf Godeps vendor
+
+## Get govendor
+go get github.com/kardianos/govendor
+
+## Init
+govendor init
+
+## Fetch deps
+echo "Fetching deps, will take some time..."
+govendor fetch +missing
+
+echo "Done; to commit run \n\ncd ${GOPATH}/src/github.com/hashicorp/${TOOL}\n"
diff --git a/vendor/github.com/hashicorp/vault/scripts/windows/build.bat b/vendor/github.com/hashicorp/vault/scripts/windows/build.bat
new file mode 100644
index 0000000..feace8f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/scripts/windows/build.bat
@@ -0,0 +1,101 @@
+@echo off
+setlocal
+
+set _EXITCODE=0
+set _DEV_BUILD=0
+
+if not exist %1 exit /b 1
+if x%2 == xVAULT_DEV set _DEV_BUILD=1
+
+cd %1
+md bin 2>nul
+
+:: Get the git commit
+set _GIT_COMMIT_FILE=%TEMP%\vault-git_commit.txt
+set _GIT_DIRTY_FILE=%TEMP%\vault-git_dirty.txt
+
+set _NUL_CMP_FILE=%TEMP%\vault-nul_cmp.txt
+type nul >%_NUL_CMP_FILE%
+
+git rev-parse HEAD >"%_GIT_COMMIT_FILE%"
+set /p _GIT_COMMIT=<"%_GIT_COMMIT_FILE%"
+del /f "%_GIT_COMMIT_FILE%" 2>nul
+
+set _GIT_DIRTY=
+git status --porcelain >"%_GIT_DIRTY_FILE%"
+fc "%_GIT_DIRTY_FILE%" "%_NUL_CMP_FILE%" >nul
+if errorlevel 1 set _GIT_DIRTY=+CHANGES
+del /f "%_GIT_DIRTY_FILE%" 2>nul
+del /f "%_NUL_CMP_FILE%" 2>nul
+
+REM Determine the arch/os combos we're building for
+set _XC_ARCH=386 amd64 arm
+set _XC_OS=linux darwin windows freebsd openbsd
+
+REM Install dependencies
+echo ==^> Installing dependencies...
+go get ./...
+
+REM Clean up the old binaries and packages.
+echo ==^> Cleaning old builds...
+rd /s /q bin pkg 2>nul
+md bin 2>nul
+
+REM If its dev mode, only build for ourself
+if not %_DEV_BUILD% equ 1 goto build
+
+:devbuild
+echo ==^> Preparing for development build...
+set _GO_ENV_TMP_FILE=%TEMP%\vault-go-env.txt
+go env GOARCH >"%_GO_ENV_TMP_FILE%"
+set /p _XC_ARCH=<"%_GO_ENV_TMP_FILE%"
+del /f "%_GO_ENV_TMP_FILE%" 2>nul
+go env GOOS >"%_GO_ENV_TMP_FILE%"
+set /p _XC_OS=<"%_GO_ENV_TMP_FILE%"
+del /f "%_GO_ENV_TMP_FILE%" 2>nul
+
+:build
+REM Build!
+echo ==^> Building...
+gox^
+ -os="%_XC_OS%"^
+ -arch="%_XC_ARCH%"^
+ -ldflags "-X github.com/hashicorp/vault/version.GitCommit %_GIT_COMMIT%%_GIT_DIRTY%"^
+ -output "pkg/{{.OS}}_{{.Arch}}/vault"^
+ .
+
+if %ERRORLEVEL% equ 1 set %_EXITCODE%=1
+
+if %_EXITCODE% equ 1 exit /b %_EXITCODE%
+
+set _GO_ENV_TMP_FILE=%TEMP%\vault-go-env.txt
+
+go env GOPATH >"%_GO_ENV_TMP_FILE%"
+set /p _GOPATH=<"%_GO_ENV_TMP_FILE%"
+del /f "%_GO_ENV_TMP_FILE%" 2>nul
+
+go env GOARCH >"%_GO_ENV_TMP_FILE%"
+set /p _GOARCH=<"%_GO_ENV_TMP_FILE%"
+del /f "%_GO_ENV_TMP_FILE%" 2>nul
+
+go env GOOS >"%_GO_ENV_TMP_FILE%"
+set /p _GOOS=<"%_GO_ENV_TMP_FILE%"
+del /f "%_GO_ENV_TMP_FILE%" 2>nul
+
+REM Copy our OS/Arch to the bin/ directory
+set _DEV_PLATFORM=pkg\%_GOOS%_%_GOARCH%
+
+for /r %%f in (%_DEV_PLATFORM%) do (
+ copy /b /y %%f bin\ >nul
+ copy /b /y %%f %_GOPATH%\bin\ >nul
+)
+
+REM TODO(ceh): package dist
+
+REM Done!
+echo.
+echo ==^> Results:
+echo.
+for %%A in ("bin\*") do echo %%~fA
+
+exit /b %_EXITCODE%
diff --git a/vendor/github.com/hashicorp/vault/shamir/shamir.go b/vendor/github.com/hashicorp/vault/shamir/shamir.go
new file mode 100644
index 0000000..d6f5137
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/shamir/shamir.go
@@ -0,0 +1,260 @@
+package shamir
+
+import (
+ "crypto/rand"
+ "crypto/subtle"
+ "fmt"
+ mathrand "math/rand"
+ "time"
+)
+
+const (
+ // ShareOverhead is the byte size overhead of each share
+ // when using Split on a secret. This is caused by appending
+ // a one byte tag to the share.
+ ShareOverhead = 1
+)
+
+// polynomial represents a polynomial of arbitrary degree
+type polynomial struct {
+ coefficients []uint8
+}
+
+// makePolynomial constructs a random polynomial of the given
+// degree but with the provided intercept value.
+func makePolynomial(intercept, degree uint8) (polynomial, error) {
+ // Create a wrapper
+ p := polynomial{
+ coefficients: make([]byte, degree+1),
+ }
+
+ // Ensure the intercept is set
+ p.coefficients[0] = intercept
+
+ // Assign random co-efficients to the polynomial
+ if _, err := rand.Read(p.coefficients[1:]); err != nil {
+ return p, err
+ }
+
+ return p, nil
+}
+
+// evaluate returns the value of the polynomial for the given x
+func (p *polynomial) evaluate(x uint8) uint8 {
+ // Special case the origin
+ if x == 0 {
+ return p.coefficients[0]
+ }
+
+ // Compute the polynomial value using Horner's method.
+ degree := len(p.coefficients) - 1
+ out := p.coefficients[degree]
+ for i := degree - 1; i >= 0; i-- {
+ coeff := p.coefficients[i]
+ out = add(mult(out, x), coeff)
+ }
+ return out
+}
+
+// interpolatePolynomial takes N sample points and returns
+// the value at a given x using a lagrange interpolation.
+func interpolatePolynomial(x_samples, y_samples []uint8, x uint8) uint8 {
+ limit := len(x_samples)
+ var result, basis uint8
+ for i := 0; i < limit; i++ {
+ basis = 1
+ for j := 0; j < limit; j++ {
+ if i == j {
+ continue
+ }
+ num := add(x, x_samples[j])
+ denom := add(x_samples[i], x_samples[j])
+ term := div(num, denom)
+ basis = mult(basis, term)
+ }
+ group := mult(y_samples[i], basis)
+ result = add(result, group)
+ }
+ return result
+}
+
+// div divides two numbers in GF(2^8)
+func div(a, b uint8) uint8 {
+ if b == 0 {
+ // leaks some timing information but we don't care anyways as this
+ // should never happen, hence the panic
+ panic("divide by zero")
+ }
+
+ var goodVal, zero uint8
+ log_a := logTable[a]
+ log_b := logTable[b]
+ diff := (int(log_a) - int(log_b)) % 255
+ if diff < 0 {
+ diff += 255
+ }
+
+ ret := expTable[diff]
+
+ // Ensure we return zero if a is zero but aren't subject to timing attacks
+ goodVal = ret
+
+ if subtle.ConstantTimeByteEq(a, 0) == 1 {
+ ret = zero
+ } else {
+ ret = goodVal
+ }
+
+ return ret
+}
+
+// mult multiplies two numbers in GF(2^8)
+func mult(a, b uint8) (out uint8) {
+ var goodVal, zero uint8
+ log_a := logTable[a]
+ log_b := logTable[b]
+ sum := (int(log_a) + int(log_b)) % 255
+
+ ret := expTable[sum]
+
+ // Ensure we return zero if either a or be are zero but aren't subject to
+ // timing attacks
+ goodVal = ret
+
+ if subtle.ConstantTimeByteEq(a, 0) == 1 {
+ ret = zero
+ } else {
+ ret = goodVal
+ }
+
+ if subtle.ConstantTimeByteEq(b, 0) == 1 {
+ ret = zero
+ } else {
+ // This operation does not do anything logically useful. It
+ // only ensures a constant number of assignments to thwart
+ // timing attacks.
+ goodVal = zero
+ }
+
+ return ret
+}
+
+// add combines two numbers in GF(2^8)
+// This can also be used for subtraction since it is symmetric.
+func add(a, b uint8) uint8 {
+ return a ^ b
+}
+
+// Split takes an arbitrarily long secret and generates a `parts`
+// number of shares, `threshold` of which are required to reconstruct
+// the secret. The parts and threshold must be at least 2, and less
+// than 256. The returned shares are each one byte longer than the secret
+// as they attach a tag used to reconstruct the secret.
+func Split(secret []byte, parts, threshold int) ([][]byte, error) {
+ // Sanity check the input
+ if parts < threshold {
+ return nil, fmt.Errorf("parts cannot be less than threshold")
+ }
+ if parts > 255 {
+ return nil, fmt.Errorf("parts cannot exceed 255")
+ }
+ if threshold < 2 {
+ return nil, fmt.Errorf("threshold must be at least 2")
+ }
+ if threshold > 255 {
+ return nil, fmt.Errorf("threshold cannot exceed 255")
+ }
+ if len(secret) == 0 {
+ return nil, fmt.Errorf("cannot split an empty secret")
+ }
+
+ // Generate random list of x coordinates
+ mathrand.Seed(time.Now().UnixNano())
+ xCoordinates := mathrand.Perm(255)
+
+ // Allocate the output array, initialize the final byte
+ // of the output with the offset. The representation of each
+ // output is {y1, y2, .., yN, x}.
+ out := make([][]byte, parts)
+ for idx := range out {
+ out[idx] = make([]byte, len(secret)+1)
+ out[idx][len(secret)] = uint8(xCoordinates[idx]) + 1
+ }
+
+ // Construct a random polynomial for each byte of the secret.
+ // Because we are using a field of size 256, we can only represent
+ // a single byte as the intercept of the polynomial, so we must
+ // use a new polynomial for each byte.
+ for idx, val := range secret {
+ p, err := makePolynomial(val, uint8(threshold-1))
+ if err != nil {
+ return nil, fmt.Errorf("failed to generate polynomial: %v", err)
+ }
+
+ // Generate a `parts` number of (x,y) pairs
+ // We cheat by encoding the x value once as the final index,
+ // so that it only needs to be stored once.
+ for i := 0; i < parts; i++ {
+ x := uint8(xCoordinates[i]) + 1
+ y := p.evaluate(x)
+ out[i][idx] = y
+ }
+ }
+
+ // Return the encoded secrets
+ return out, nil
+}
+
+// Combine is used to reverse a Split and reconstruct a secret
+// once a `threshold` number of parts are available.
+func Combine(parts [][]byte) ([]byte, error) {
+ // Verify enough parts provided
+ if len(parts) < 2 {
+ return nil, fmt.Errorf("less than two parts cannot be used to reconstruct the secret")
+ }
+
+ // Verify the parts are all the same length
+ firstPartLen := len(parts[0])
+ if firstPartLen < 2 {
+ return nil, fmt.Errorf("parts must be at least two bytes")
+ }
+ for i := 1; i < len(parts); i++ {
+ if len(parts[i]) != firstPartLen {
+ return nil, fmt.Errorf("all parts must be the same length")
+ }
+ }
+
+ // Create a buffer to store the reconstructed secret
+ secret := make([]byte, firstPartLen-1)
+
+ // Buffer to store the samples
+ x_samples := make([]uint8, len(parts))
+ y_samples := make([]uint8, len(parts))
+
+ // Set the x value for each sample and ensure no x_sample values are the same,
+ // otherwise div() can be unhappy
+ checkMap := map[byte]bool{}
+ for i, part := range parts {
+ samp := part[firstPartLen-1]
+ if exists := checkMap[samp]; exists {
+ return nil, fmt.Errorf("duplicate part detected")
+ }
+ checkMap[samp] = true
+ x_samples[i] = samp
+ }
+
+ // Reconstruct each byte
+ for idx := range secret {
+ // Set the y value for each sample
+ for i, part := range parts {
+ y_samples[i] = part[idx]
+ }
+
+ // Interpolte the polynomial and compute the value at 0
+ val := interpolatePolynomial(x_samples, y_samples, 0)
+
+ // Evaluate the 0th value to get the intercept
+ secret[idx] = val
+ }
+ return secret, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/shamir/shamir_test.go b/vendor/github.com/hashicorp/vault/shamir/shamir_test.go
new file mode 100644
index 0000000..09f90d5
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/shamir/shamir_test.go
@@ -0,0 +1,198 @@
+package shamir
+
+import (
+ "bytes"
+ "testing"
+)
+
+func TestSplit_invalid(t *testing.T) {
+ secret := []byte("test")
+
+ if _, err := Split(secret, 0, 0); err == nil {
+ t.Fatalf("expect error")
+ }
+
+ if _, err := Split(secret, 2, 3); err == nil {
+ t.Fatalf("expect error")
+ }
+
+ if _, err := Split(secret, 1000, 3); err == nil {
+ t.Fatalf("expect error")
+ }
+
+ if _, err := Split(secret, 10, 1); err == nil {
+ t.Fatalf("expect error")
+ }
+
+ if _, err := Split(nil, 3, 2); err == nil {
+ t.Fatalf("expect error")
+ }
+}
+
+func TestSplit(t *testing.T) {
+ secret := []byte("test")
+
+ out, err := Split(secret, 5, 3)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if len(out) != 5 {
+ t.Fatalf("bad: %v", out)
+ }
+
+ for _, share := range out {
+ if len(share) != len(secret)+1 {
+ t.Fatalf("bad: %v", out)
+ }
+ }
+}
+
+func TestCombine_invalid(t *testing.T) {
+ // Not enough parts
+ if _, err := Combine(nil); err == nil {
+ t.Fatalf("should err")
+ }
+
+ // Mis-match in length
+ parts := [][]byte{
+ []byte("foo"),
+ []byte("ba"),
+ }
+ if _, err := Combine(parts); err == nil {
+ t.Fatalf("should err")
+ }
+
+ //Too short
+ parts = [][]byte{
+ []byte("f"),
+ []byte("b"),
+ }
+ if _, err := Combine(parts); err == nil {
+ t.Fatalf("should err")
+ }
+
+ parts = [][]byte{
+ []byte("foo"),
+ []byte("foo"),
+ }
+ if _, err := Combine(parts); err == nil {
+ t.Fatalf("should err")
+ }
+}
+
+func TestCombine(t *testing.T) {
+ secret := []byte("test")
+
+ out, err := Split(secret, 5, 3)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // There is 5*4*3 possible choices,
+ // we will just brute force try them all
+ for i := 0; i < 5; i++ {
+ for j := 0; j < 5; j++ {
+ if j == i {
+ continue
+ }
+ for k := 0; k < 5; k++ {
+ if k == i || k == j {
+ continue
+ }
+ parts := [][]byte{out[i], out[j], out[k]}
+ recomb, err := Combine(parts)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if !bytes.Equal(recomb, secret) {
+ t.Errorf("parts: (i:%d, j:%d, k:%d) %v", i, j, k, parts)
+ t.Fatalf("bad: %v %v", recomb, secret)
+ }
+ }
+ }
+ }
+}
+
+func TestField_Add(t *testing.T) {
+ if out := add(16, 16); out != 0 {
+ t.Fatalf("Bad: %v 16", out)
+ }
+
+ if out := add(3, 4); out != 7 {
+ t.Fatalf("Bad: %v 7", out)
+ }
+}
+
+func TestField_Mult(t *testing.T) {
+ if out := mult(3, 7); out != 9 {
+ t.Fatalf("Bad: %v 9", out)
+ }
+
+ if out := mult(3, 0); out != 0 {
+ t.Fatalf("Bad: %v 0", out)
+ }
+
+ if out := mult(0, 3); out != 0 {
+ t.Fatalf("Bad: %v 0", out)
+ }
+}
+
+func TestField_Divide(t *testing.T) {
+ if out := div(0, 7); out != 0 {
+ t.Fatalf("Bad: %v 0", out)
+ }
+
+ if out := div(3, 3); out != 1 {
+ t.Fatalf("Bad: %v 1", out)
+ }
+
+ if out := div(6, 3); out != 2 {
+ t.Fatalf("Bad: %v 2", out)
+ }
+}
+
+func TestPolynomial_Random(t *testing.T) {
+ p, err := makePolynomial(42, 2)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if p.coefficients[0] != 42 {
+ t.Fatalf("bad: %v", p.coefficients)
+ }
+}
+
+func TestPolynomial_Eval(t *testing.T) {
+ p, err := makePolynomial(42, 1)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if out := p.evaluate(0); out != 42 {
+ t.Fatalf("bad: %v", out)
+ }
+
+ out := p.evaluate(1)
+ exp := add(42, mult(1, p.coefficients[1]))
+ if out != exp {
+ t.Fatalf("bad: %v %v %v", out, exp, p.coefficients)
+ }
+}
+
+func TestInterpolate_Rand(t *testing.T) {
+ for i := 0; i < 256; i++ {
+ p, err := makePolynomial(uint8(i), 2)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ x_vals := []uint8{1, 2, 3}
+ y_vals := []uint8{p.evaluate(1), p.evaluate(2), p.evaluate(3)}
+ out := interpolatePolynomial(x_vals, y_vals, 0)
+ if out != uint8(i) {
+ t.Fatalf("Bad: %v %d", out, i)
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/shamir/tables.go b/vendor/github.com/hashicorp/vault/shamir/tables.go
new file mode 100644
index 0000000..76c245e
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/shamir/tables.go
@@ -0,0 +1,77 @@
+package shamir
+
+// Tables taken from http://www.samiam.org/galois.html
+// They use 0xe5 (229) as the generator
+
+var (
+ // logTable provides the log(X)/log(g) at each index X
+ logTable = [256]uint8{
+ 0x00, 0xff, 0xc8, 0x08, 0x91, 0x10, 0xd0, 0x36,
+ 0x5a, 0x3e, 0xd8, 0x43, 0x99, 0x77, 0xfe, 0x18,
+ 0x23, 0x20, 0x07, 0x70, 0xa1, 0x6c, 0x0c, 0x7f,
+ 0x62, 0x8b, 0x40, 0x46, 0xc7, 0x4b, 0xe0, 0x0e,
+ 0xeb, 0x16, 0xe8, 0xad, 0xcf, 0xcd, 0x39, 0x53,
+ 0x6a, 0x27, 0x35, 0x93, 0xd4, 0x4e, 0x48, 0xc3,
+ 0x2b, 0x79, 0x54, 0x28, 0x09, 0x78, 0x0f, 0x21,
+ 0x90, 0x87, 0x14, 0x2a, 0xa9, 0x9c, 0xd6, 0x74,
+ 0xb4, 0x7c, 0xde, 0xed, 0xb1, 0x86, 0x76, 0xa4,
+ 0x98, 0xe2, 0x96, 0x8f, 0x02, 0x32, 0x1c, 0xc1,
+ 0x33, 0xee, 0xef, 0x81, 0xfd, 0x30, 0x5c, 0x13,
+ 0x9d, 0x29, 0x17, 0xc4, 0x11, 0x44, 0x8c, 0x80,
+ 0xf3, 0x73, 0x42, 0x1e, 0x1d, 0xb5, 0xf0, 0x12,
+ 0xd1, 0x5b, 0x41, 0xa2, 0xd7, 0x2c, 0xe9, 0xd5,
+ 0x59, 0xcb, 0x50, 0xa8, 0xdc, 0xfc, 0xf2, 0x56,
+ 0x72, 0xa6, 0x65, 0x2f, 0x9f, 0x9b, 0x3d, 0xba,
+ 0x7d, 0xc2, 0x45, 0x82, 0xa7, 0x57, 0xb6, 0xa3,
+ 0x7a, 0x75, 0x4f, 0xae, 0x3f, 0x37, 0x6d, 0x47,
+ 0x61, 0xbe, 0xab, 0xd3, 0x5f, 0xb0, 0x58, 0xaf,
+ 0xca, 0x5e, 0xfa, 0x85, 0xe4, 0x4d, 0x8a, 0x05,
+ 0xfb, 0x60, 0xb7, 0x7b, 0xb8, 0x26, 0x4a, 0x67,
+ 0xc6, 0x1a, 0xf8, 0x69, 0x25, 0xb3, 0xdb, 0xbd,
+ 0x66, 0xdd, 0xf1, 0xd2, 0xdf, 0x03, 0x8d, 0x34,
+ 0xd9, 0x92, 0x0d, 0x63, 0x55, 0xaa, 0x49, 0xec,
+ 0xbc, 0x95, 0x3c, 0x84, 0x0b, 0xf5, 0xe6, 0xe7,
+ 0xe5, 0xac, 0x7e, 0x6e, 0xb9, 0xf9, 0xda, 0x8e,
+ 0x9a, 0xc9, 0x24, 0xe1, 0x0a, 0x15, 0x6b, 0x3a,
+ 0xa0, 0x51, 0xf4, 0xea, 0xb2, 0x97, 0x9e, 0x5d,
+ 0x22, 0x88, 0x94, 0xce, 0x19, 0x01, 0x71, 0x4c,
+ 0xa5, 0xe3, 0xc5, 0x31, 0xbb, 0xcc, 0x1f, 0x2d,
+ 0x3b, 0x52, 0x6f, 0xf6, 0x2e, 0x89, 0xf7, 0xc0,
+ 0x68, 0x1b, 0x64, 0x04, 0x06, 0xbf, 0x83, 0x38}
+
+ // expTable provides the anti-log or exponentiation value
+ // for the equivalent index
+ expTable = [256]uint8{
+ 0x01, 0xe5, 0x4c, 0xb5, 0xfb, 0x9f, 0xfc, 0x12,
+ 0x03, 0x34, 0xd4, 0xc4, 0x16, 0xba, 0x1f, 0x36,
+ 0x05, 0x5c, 0x67, 0x57, 0x3a, 0xd5, 0x21, 0x5a,
+ 0x0f, 0xe4, 0xa9, 0xf9, 0x4e, 0x64, 0x63, 0xee,
+ 0x11, 0x37, 0xe0, 0x10, 0xd2, 0xac, 0xa5, 0x29,
+ 0x33, 0x59, 0x3b, 0x30, 0x6d, 0xef, 0xf4, 0x7b,
+ 0x55, 0xeb, 0x4d, 0x50, 0xb7, 0x2a, 0x07, 0x8d,
+ 0xff, 0x26, 0xd7, 0xf0, 0xc2, 0x7e, 0x09, 0x8c,
+ 0x1a, 0x6a, 0x62, 0x0b, 0x5d, 0x82, 0x1b, 0x8f,
+ 0x2e, 0xbe, 0xa6, 0x1d, 0xe7, 0x9d, 0x2d, 0x8a,
+ 0x72, 0xd9, 0xf1, 0x27, 0x32, 0xbc, 0x77, 0x85,
+ 0x96, 0x70, 0x08, 0x69, 0x56, 0xdf, 0x99, 0x94,
+ 0xa1, 0x90, 0x18, 0xbb, 0xfa, 0x7a, 0xb0, 0xa7,
+ 0xf8, 0xab, 0x28, 0xd6, 0x15, 0x8e, 0xcb, 0xf2,
+ 0x13, 0xe6, 0x78, 0x61, 0x3f, 0x89, 0x46, 0x0d,
+ 0x35, 0x31, 0x88, 0xa3, 0x41, 0x80, 0xca, 0x17,
+ 0x5f, 0x53, 0x83, 0xfe, 0xc3, 0x9b, 0x45, 0x39,
+ 0xe1, 0xf5, 0x9e, 0x19, 0x5e, 0xb6, 0xcf, 0x4b,
+ 0x38, 0x04, 0xb9, 0x2b, 0xe2, 0xc1, 0x4a, 0xdd,
+ 0x48, 0x0c, 0xd0, 0x7d, 0x3d, 0x58, 0xde, 0x7c,
+ 0xd8, 0x14, 0x6b, 0x87, 0x47, 0xe8, 0x79, 0x84,
+ 0x73, 0x3c, 0xbd, 0x92, 0xc9, 0x23, 0x8b, 0x97,
+ 0x95, 0x44, 0xdc, 0xad, 0x40, 0x65, 0x86, 0xa2,
+ 0xa4, 0xcc, 0x7f, 0xec, 0xc0, 0xaf, 0x91, 0xfd,
+ 0xf7, 0x4f, 0x81, 0x2f, 0x5b, 0xea, 0xa8, 0x1c,
+ 0x02, 0xd1, 0x98, 0x71, 0xed, 0x25, 0xe3, 0x24,
+ 0x06, 0x68, 0xb3, 0x93, 0x2c, 0x6f, 0x3e, 0x6c,
+ 0x0a, 0xb8, 0xce, 0xae, 0x74, 0xb1, 0x42, 0xb4,
+ 0x1e, 0xd3, 0x49, 0xe9, 0x9c, 0xc8, 0xc6, 0xc7,
+ 0x22, 0x6e, 0xdb, 0x20, 0xbf, 0x43, 0x51, 0x52,
+ 0x66, 0xb2, 0x76, 0x60, 0xda, 0xc5, 0xf3, 0xf6,
+ 0xaa, 0xcd, 0x9a, 0xa0, 0x75, 0x54, 0x0e, 0x01}
+)
diff --git a/vendor/github.com/hashicorp/vault/shamir/tables_test.go b/vendor/github.com/hashicorp/vault/shamir/tables_test.go
new file mode 100644
index 0000000..81aa983
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/shamir/tables_test.go
@@ -0,0 +1,13 @@
+package shamir
+
+import "testing"
+
+func TestTables(t *testing.T) {
+ for i := 1; i < 256; i++ {
+ logV := logTable[i]
+ expV := expTable[logV]
+ if expV != uint8(i) {
+ t.Fatalf("bad: %d log: %d exp: %d", i, logV, expV)
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/terraform/aws/README.md b/vendor/github.com/hashicorp/vault/terraform/aws/README.md
new file mode 100644
index 0000000..cd9d8f4
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/terraform/aws/README.md
@@ -0,0 +1,8 @@
+# Deploy Vault to AWS
+
+This folder contains a Terraform module for deploying Vault to AWS
+(within a VPC). It can be used as-is or can be modified to work in your
+scenario, but should serve as a strong starting point for deploying Vault.
+
+See `variables.tf` for a full reference to the parameters that this module
+takes and their descriptions.
diff --git a/vendor/github.com/hashicorp/vault/terraform/aws/main.tf b/vendor/github.com/hashicorp/vault/terraform/aws/main.tf
new file mode 100644
index 0000000..279b0bf
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/terraform/aws/main.tf
@@ -0,0 +1,140 @@
+resource "template_file" "install" {
+ template = "${file("${path.module}/scripts/install.sh.tpl")}"
+
+ vars {
+ download_url = "${var.download-url}"
+ config = "${var.config}"
+ extra-install = "${var.extra-install}"
+ }
+}
+
+// We launch Vault into an ASG so that it can properly bring them up for us.
+resource "aws_autoscaling_group" "vault" {
+ name = "vault - ${aws_launch_configuration.vault.name}"
+ launch_configuration = "${aws_launch_configuration.vault.name}"
+ availability_zones = ["${split(",", var.availability-zones)}"]
+ min_size = "${var.nodes}"
+ max_size = "${var.nodes}"
+ desired_capacity = "${var.nodes}"
+ health_check_grace_period = 15
+ health_check_type = "EC2"
+ vpc_zone_identifier = ["${split(",", var.subnets)}"]
+ load_balancers = ["${aws_elb.vault.id}"]
+
+ tag {
+ key = "Name"
+ value = "vault"
+ propagate_at_launch = true
+ }
+}
+
+resource "aws_launch_configuration" "vault" {
+ image_id = "${var.ami}"
+ instance_type = "${var.instance_type}"
+ key_name = "${var.key-name}"
+ security_groups = ["${aws_security_group.vault.id}"]
+ user_data = "${template_file.install.rendered}"
+}
+
+// Security group for Vault allows SSH and HTTP access (via "tcp" in
+// case TLS is used)
+resource "aws_security_group" "vault" {
+ name = "vault"
+ description = "Vault servers"
+ vpc_id = "${var.vpc-id}"
+}
+
+resource "aws_security_group_rule" "vault-ssh" {
+ security_group_id = "${aws_security_group.vault.id}"
+ type = "ingress"
+ from_port = 22
+ to_port = 22
+ protocol = "tcp"
+ cidr_blocks = ["0.0.0.0/0"]
+}
+
+// This rule allows Vault HTTP API access to individual nodes, since each will
+// need to be addressed individually for unsealing.
+resource "aws_security_group_rule" "vault-http-api" {
+ security_group_id = "${aws_security_group.vault.id}"
+ type = "ingress"
+ from_port = 8200
+ to_port = 8200
+ protocol = "tcp"
+ cidr_blocks = ["0.0.0.0/0"]
+}
+
+resource "aws_security_group_rule" "vault-egress" {
+ security_group_id = "${aws_security_group.vault.id}"
+ type = "egress"
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
+ cidr_blocks = ["0.0.0.0/0"]
+}
+
+// Launch the ELB that is serving Vault. This has proper health checks
+// to only serve healthy, unsealed Vaults.
+resource "aws_elb" "vault" {
+ name = "vault"
+ connection_draining = true
+ connection_draining_timeout = 400
+ internal = true
+ subnets = ["${split(",", var.subnets)}"]
+ security_groups = ["${aws_security_group.elb.id}"]
+
+ listener {
+ instance_port = 8200
+ instance_protocol = "tcp"
+ lb_port = 80
+ lb_protocol = "tcp"
+ }
+
+ listener {
+ instance_port = 8200
+ instance_protocol = "tcp"
+ lb_port = 443
+ lb_protocol = "tcp"
+ }
+
+ health_check {
+ healthy_threshold = 2
+ unhealthy_threshold = 3
+ timeout = 5
+ target = "${var.elb-health-check}"
+ interval = 15
+ }
+}
+
+resource "aws_security_group" "elb" {
+ name = "vault-elb"
+ description = "Vault ELB"
+ vpc_id = "${var.vpc-id}"
+}
+
+resource "aws_security_group_rule" "vault-elb-http" {
+ security_group_id = "${aws_security_group.elb.id}"
+ type = "ingress"
+ from_port = 80
+ to_port = 80
+ protocol = "tcp"
+ cidr_blocks = ["0.0.0.0/0"]
+}
+
+resource "aws_security_group_rule" "vault-elb-https" {
+ security_group_id = "${aws_security_group.elb.id}"
+ type = "ingress"
+ from_port = 443
+ to_port = 443
+ protocol = "tcp"
+ cidr_blocks = ["0.0.0.0/0"]
+}
+
+resource "aws_security_group_rule" "vault-elb-egress" {
+ security_group_id = "${aws_security_group.elb.id}"
+ type = "egress"
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
+ cidr_blocks = ["0.0.0.0/0"]
+}
diff --git a/vendor/github.com/hashicorp/vault/terraform/aws/outputs.tf b/vendor/github.com/hashicorp/vault/terraform/aws/outputs.tf
new file mode 100644
index 0000000..392d7af
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/terraform/aws/outputs.tf
@@ -0,0 +1,13 @@
+output "address" {
+ value = "${aws_elb.vault.dns_name}"
+}
+
+// Can be used to add additional SG rules to Vault instances.
+output "vault_security_group" {
+ value = "${aws_security_group.vault.id}"
+}
+
+// Can be used to add additional SG rules to the Vault ELB.
+output "elb_security_group" {
+ value = "${aws_security_group.elb.id}"
+}
diff --git a/vendor/github.com/hashicorp/vault/terraform/aws/scripts/install.sh.tpl b/vendor/github.com/hashicorp/vault/terraform/aws/scripts/install.sh.tpl
new file mode 100644
index 0000000..03296b4
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/terraform/aws/scripts/install.sh.tpl
@@ -0,0 +1,53 @@
+#!/usr/bin/env bash
+set -e
+
+# Install packages
+sudo apt-get update -y
+sudo apt-get install -y curl unzip
+
+# Download Vault into some temporary directory
+curl -L "${download_url}" > /tmp/vault.zip
+
+# Unzip it
+cd /tmp
+sudo unzip vault.zip
+sudo mv vault /usr/local/bin
+sudo chmod 0755 /usr/local/bin/vault
+sudo chown root:root /usr/local/bin/vault
+
+# Setup the configuration
+cat </tmp/vault-config
+${config}
+EOF
+sudo mv /tmp/vault-config /usr/local/etc/vault-config.json
+
+# Setup the init script
+cat </tmp/upstart
+description "Vault server"
+
+start on runlevel [2345]
+stop on runlevel [!2345]
+
+respawn
+
+script
+ if [ -f "/etc/service/vault" ]; then
+ . /etc/service/vault
+ fi
+
+ # Make sure to use all our CPUs, because Vault can block a scheduler thread
+ export GOMAXPROCS=`nproc`
+
+ exec /usr/local/bin/vault server \
+ -config="/usr/local/etc/vault-config.json" \
+ \$${VAULT_FLAGS} \
+ >>/var/log/vault.log 2>&1
+end script
+EOF
+sudo mv /tmp/upstart /etc/init/vault.conf
+
+# Extra install steps (if any)
+${extra-install}
+
+# Start Vault
+sudo start vault
diff --git a/vendor/github.com/hashicorp/vault/terraform/aws/variables.tf b/vendor/github.com/hashicorp/vault/terraform/aws/variables.tf
new file mode 100644
index 0000000..ba0ee3a
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/terraform/aws/variables.tf
@@ -0,0 +1,59 @@
+//-------------------------------------------------------------------
+// Vault settings
+//-------------------------------------------------------------------
+
+variable "download-url" {
+ default = "https://releases.hashicorp.com/vault/0.7.0/vault_0.7.0_linux_amd64.zip"
+ description = "URL to download Vault"
+}
+
+variable "config" {
+ description = "Configuration (text) for Vault"
+}
+
+variable "extra-install" {
+ default = ""
+ description = "Extra commands to run in the install script"
+}
+
+//-------------------------------------------------------------------
+// AWS settings
+//-------------------------------------------------------------------
+
+variable "ami" {
+ default = "ami-7eb2a716"
+ description = "AMI for Vault instances"
+}
+
+variable "availability-zones" {
+ default = "us-east-1a,us-east-1b"
+ description = "Availability zones for launching the Vault instances"
+}
+
+variable "elb-health-check" {
+ default = "HTTP:8200/v1/sys/health"
+ description = "Health check for Vault servers"
+}
+
+variable "instance_type" {
+ default = "m3.medium"
+ description = "Instance type for Vault instances"
+}
+
+variable "key-name" {
+ default = "default"
+ description = "SSH key name for Vault instances"
+}
+
+variable "nodes" {
+ default = "2"
+ description = "number of Vault instances"
+}
+
+variable "subnets" {
+ description = "list of subnets to launch Vault within"
+}
+
+variable "vpc-id" {
+ description = "VPC ID"
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/acl.go b/vendor/github.com/hashicorp/vault/vault/acl.go
new file mode 100644
index 0000000..550e0df
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/acl.go
@@ -0,0 +1,365 @@
+package vault
+
+import (
+ "reflect"
+ "strings"
+
+ "github.com/armon/go-radix"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/logical"
+)
+
+// ACL is used to wrap a set of policies to provide
+// an efficient interface for access control.
+type ACL struct {
+ // exactRules contains the path policies that are exact
+ exactRules *radix.Tree
+
+ // globRules contains the path policies that glob
+ globRules *radix.Tree
+
+ // root is enabled if the "root" named policy is present.
+ root bool
+}
+
+// New is used to construct a policy based ACL from a set of policies.
+func NewACL(policies []*Policy) (*ACL, error) {
+ // Initialize
+ a := &ACL{
+ exactRules: radix.New(),
+ globRules: radix.New(),
+ root: false,
+ }
+
+ // Inject each policy
+ for _, policy := range policies {
+ // Ignore a nil policy object
+ if policy == nil {
+ continue
+ }
+ // Check if this is root
+ if policy.Name == "root" {
+ a.root = true
+ }
+ for _, pc := range policy.Paths {
+ // Check which tree to use
+ tree := a.exactRules
+ if pc.Glob {
+ tree = a.globRules
+ }
+
+ // Check for an existing policy
+ raw, ok := tree.Get(pc.Prefix)
+ if !ok {
+ tree.Insert(pc.Prefix, pc.Permissions)
+ continue
+ }
+
+ // these are the ones already in the tree
+ existingPerms := raw.(*Permissions)
+
+ switch {
+ case existingPerms.CapabilitiesBitmap&DenyCapabilityInt > 0:
+ // If we are explicitly denied in the existing capability set,
+ // don't save anything else
+ continue
+
+ case pc.Permissions.CapabilitiesBitmap&DenyCapabilityInt > 0:
+ // If this new policy explicitly denies, only save the deny value
+ pc.Permissions.CapabilitiesBitmap = DenyCapabilityInt
+ pc.Permissions.AllowedParameters = nil
+ pc.Permissions.DeniedParameters = nil
+ goto INSERT
+
+ default:
+ // Insert the capabilities in this new policy into the existing
+ // value
+ pc.Permissions.CapabilitiesBitmap = existingPerms.CapabilitiesBitmap | pc.Permissions.CapabilitiesBitmap
+ }
+
+ // Note: In these stanzas, we're preferring minimum lifetimes. So
+ // we take the lesser of two specified max values, or we take the
+ // lesser of two specified min values, the idea being, allowing
+ // token lifetime to be minimum possible.
+ //
+ // If we have an existing max, and we either don't have a current
+ // max, or the current is greater than the previous, use the
+ // existing.
+ if existingPerms.MaxWrappingTTL > 0 &&
+ (pc.Permissions.MaxWrappingTTL == 0 ||
+ existingPerms.MaxWrappingTTL < pc.Permissions.MaxWrappingTTL) {
+ pc.Permissions.MaxWrappingTTL = existingPerms.MaxWrappingTTL
+ }
+ // If we have an existing min, and we either don't have a current
+ // min, or the current is greater than the previous, use the
+ // existing
+ if existingPerms.MinWrappingTTL > 0 &&
+ (pc.Permissions.MinWrappingTTL == 0 ||
+ existingPerms.MinWrappingTTL < pc.Permissions.MinWrappingTTL) {
+ pc.Permissions.MinWrappingTTL = existingPerms.MinWrappingTTL
+ }
+
+ if len(existingPerms.AllowedParameters) > 0 {
+ if pc.Permissions.AllowedParameters == nil {
+ pc.Permissions.AllowedParameters = existingPerms.AllowedParameters
+ } else {
+ for key, value := range existingPerms.AllowedParameters {
+ pcValue, ok := pc.Permissions.AllowedParameters[key]
+ // If an empty array exist it should overwrite any other
+ // value.
+ if len(value) == 0 || (ok && len(pcValue) == 0) {
+ pc.Permissions.AllowedParameters[key] = []interface{}{}
+ } else {
+ // Merge the two maps, appending values on key conflict.
+ pc.Permissions.AllowedParameters[key] = append(value, pc.Permissions.AllowedParameters[key]...)
+ }
+ }
+ }
+ }
+
+ if len(existingPerms.DeniedParameters) > 0 {
+ if pc.Permissions.DeniedParameters == nil {
+ pc.Permissions.DeniedParameters = existingPerms.DeniedParameters
+ } else {
+ for key, value := range existingPerms.DeniedParameters {
+ pcValue, ok := pc.Permissions.DeniedParameters[key]
+ // If an empty array exist it should overwrite any other
+ // value.
+ if len(value) == 0 || (ok && len(pcValue) == 0) {
+ pc.Permissions.DeniedParameters[key] = []interface{}{}
+ } else {
+ // Merge the two maps, appending values on key conflict.
+ pc.Permissions.DeniedParameters[key] = append(value, pc.Permissions.DeniedParameters[key]...)
+ }
+ }
+ }
+ }
+
+ INSERT:
+
+ tree.Insert(pc.Prefix, pc.Permissions)
+
+ }
+ }
+ return a, nil
+}
+
+func (a *ACL) Capabilities(path string) (pathCapabilities []string) {
+ // Fast-path root
+ if a.root {
+ return []string{RootCapability}
+ }
+
+ // Find an exact matching rule, look for glob if no match
+ var capabilities uint32
+ raw, ok := a.exactRules.Get(path)
+
+ if ok {
+ perm := raw.(*Permissions)
+ capabilities = perm.CapabilitiesBitmap
+ goto CHECK
+ }
+
+ // Find a glob rule, default deny if no match
+ _, raw, ok = a.globRules.LongestPrefix(path)
+ if !ok {
+ return []string{DenyCapability}
+ } else {
+ perm := raw.(*Permissions)
+ capabilities = perm.CapabilitiesBitmap
+ }
+
+CHECK:
+ if capabilities&SudoCapabilityInt > 0 {
+ pathCapabilities = append(pathCapabilities, SudoCapability)
+ }
+ if capabilities&ReadCapabilityInt > 0 {
+ pathCapabilities = append(pathCapabilities, ReadCapability)
+ }
+ if capabilities&ListCapabilityInt > 0 {
+ pathCapabilities = append(pathCapabilities, ListCapability)
+ }
+ if capabilities&UpdateCapabilityInt > 0 {
+ pathCapabilities = append(pathCapabilities, UpdateCapability)
+ }
+ if capabilities&DeleteCapabilityInt > 0 {
+ pathCapabilities = append(pathCapabilities, DeleteCapability)
+ }
+ if capabilities&CreateCapabilityInt > 0 {
+ pathCapabilities = append(pathCapabilities, CreateCapability)
+ }
+
+ // If "deny" is explicitly set or if the path has no capabilities at all,
+ // set the path capabilities to "deny"
+ if capabilities&DenyCapabilityInt > 0 || len(pathCapabilities) == 0 {
+ pathCapabilities = []string{DenyCapability}
+ }
+ return
+}
+
+// AllowOperation is used to check if the given operation is permitted. The
+// first bool indicates if an op is allowed, the second whether sudo priviliges
+// exist for that op and path.
+func (a *ACL) AllowOperation(req *logical.Request) (bool, bool) {
+ // Fast-path root
+ if a.root {
+ return true, true
+ }
+ op := req.Operation
+ path := req.Path
+
+ // Help is always allowed
+ if op == logical.HelpOperation {
+ return true, false
+ }
+
+ var permissions *Permissions
+
+ // Find an exact matching rule, look for glob if no match
+ var capabilities uint32
+ raw, ok := a.exactRules.Get(path)
+ if ok {
+ permissions = raw.(*Permissions)
+ capabilities = permissions.CapabilitiesBitmap
+ goto CHECK
+ }
+
+ // Find a glob rule, default deny if no match
+ _, raw, ok = a.globRules.LongestPrefix(path)
+ if !ok {
+ return false, false
+ } else {
+ permissions = raw.(*Permissions)
+ capabilities = permissions.CapabilitiesBitmap
+ }
+
+CHECK:
+ // Check if the minimum permissions are met
+ // If "deny" has been explicitly set, only deny will be in the map, so we
+ // only need to check for the existence of other values
+ sudo := capabilities&SudoCapabilityInt > 0
+ operationAllowed := false
+ switch op {
+ case logical.ReadOperation:
+ operationAllowed = capabilities&ReadCapabilityInt > 0
+ case logical.ListOperation:
+ operationAllowed = capabilities&ListCapabilityInt > 0
+ case logical.UpdateOperation:
+ operationAllowed = capabilities&UpdateCapabilityInt > 0
+ case logical.DeleteOperation:
+ operationAllowed = capabilities&DeleteCapabilityInt > 0
+ case logical.CreateOperation:
+ operationAllowed = capabilities&CreateCapabilityInt > 0
+
+ // These three re-use UpdateCapabilityInt since that's the most appropriate
+ // capability/operation mapping
+ case logical.RevokeOperation, logical.RenewOperation, logical.RollbackOperation:
+ operationAllowed = capabilities&UpdateCapabilityInt > 0
+
+ default:
+ return false, false
+ }
+
+ if !operationAllowed {
+ return false, sudo
+ }
+
+ if permissions.MaxWrappingTTL > 0 {
+ if req.WrapInfo == nil || req.WrapInfo.TTL > permissions.MaxWrappingTTL {
+ return false, sudo
+ }
+ }
+ if permissions.MinWrappingTTL > 0 {
+ if req.WrapInfo == nil || req.WrapInfo.TTL < permissions.MinWrappingTTL {
+ return false, sudo
+ }
+ }
+ // This situation can happen because of merging, even though in a single
+ // path statement we check on ingress
+ if permissions.MinWrappingTTL != 0 &&
+ permissions.MaxWrappingTTL != 0 &&
+ permissions.MaxWrappingTTL < permissions.MinWrappingTTL {
+ return false, sudo
+ }
+
+ // Only check parameter permissions for operations that can modify
+ // parameters.
+ if op == logical.UpdateOperation || op == logical.CreateOperation {
+ // If there are no data fields, allow
+ if len(req.Data) == 0 {
+ return true, sudo
+ }
+
+ if len(permissions.DeniedParameters) == 0 {
+ goto ALLOWED_PARAMETERS
+ }
+
+ // Check if all parameters have been denied
+ if _, ok := permissions.DeniedParameters["*"]; ok {
+ return false, sudo
+ }
+
+ for parameter, value := range req.Data {
+ // Check if parameter has been explictly denied
+ if valueSlice, ok := permissions.DeniedParameters[strings.ToLower(parameter)]; ok {
+ // If the value exists in denied values slice, deny
+ if valueInParameterList(value, valueSlice) {
+ return false, sudo
+ }
+ }
+ }
+
+ ALLOWED_PARAMETERS:
+ // If we don't have any allowed parameters set, allow
+ if len(permissions.AllowedParameters) == 0 {
+ return true, sudo
+ }
+
+ _, allowedAll := permissions.AllowedParameters["*"]
+ if len(permissions.AllowedParameters) == 1 && allowedAll {
+ return true, sudo
+ }
+
+ for parameter, value := range req.Data {
+ valueSlice, ok := permissions.AllowedParameters[strings.ToLower(parameter)]
+ // Requested parameter is not in allowed list
+ if !ok && !allowedAll {
+ return false, sudo
+ }
+
+ // If the value doesn't exists in the allowed values slice,
+ // deny
+ if ok && !valueInParameterList(value, valueSlice) {
+ return false, sudo
+ }
+ }
+ }
+
+ return true, sudo
+}
+
+func valueInParameterList(v interface{}, list []interface{}) bool {
+ // Empty list is equivalent to the item always existing in the list
+ if len(list) == 0 {
+ return true
+ }
+
+ return valueInSlice(v, list)
+}
+
+func valueInSlice(v interface{}, list []interface{}) bool {
+ for _, el := range list {
+ if reflect.TypeOf(el).String() == "string" && reflect.TypeOf(v).String() == "string" {
+ item := el.(string)
+ val := v.(string)
+
+ if strutil.GlobbedStringsMatch(item, val) {
+ return true
+ }
+ } else if reflect.DeepEqual(el, v) {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/acl_test.go b/vendor/github.com/hashicorp/vault/vault/acl_test.go
new file mode 100644
index 0000000..7eb45b8
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/acl_test.go
@@ -0,0 +1,735 @@
+package vault
+
+import (
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestACL_Capabilities(t *testing.T) {
+ // Create the root policy ACL
+ policy := []*Policy{&Policy{Name: "root"}}
+ acl, err := NewACL(policy)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ actual := acl.Capabilities("any/path")
+ expected := []string{"root"}
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: got\n%#v\nexpected\n%#v\n", actual, expected)
+ }
+
+ policies, err := Parse(aclPolicy)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ acl, err = NewACL([]*Policy{policies})
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ actual = acl.Capabilities("dev")
+ expected = []string{"deny"}
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: path:%s\ngot\n%#v\nexpected\n%#v\n", "deny", actual, expected)
+ }
+
+ actual = acl.Capabilities("dev/")
+ expected = []string{"sudo", "read", "list", "update", "delete", "create"}
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: path:%s\ngot\n%#v\nexpected\n%#v\n", "dev/", actual, expected)
+ }
+
+ actual = acl.Capabilities("stage/aws/test")
+ expected = []string{"sudo", "read", "list", "update"}
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: path:%s\ngot\n%#v\nexpected\n%#v\n", "stage/aws/test", actual, expected)
+ }
+
+}
+
+func TestACL_Root(t *testing.T) {
+ // Create the root policy ACL
+ policy := []*Policy{&Policy{Name: "root"}}
+ acl, err := NewACL(policy)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ request := new(logical.Request)
+ request.Operation = logical.UpdateOperation
+ request.Path = "sys/mount/foo"
+ allowed, rootPrivs := acl.AllowOperation(request)
+ if !rootPrivs {
+ t.Fatalf("expected root")
+ }
+ if !allowed {
+ t.Fatalf("expected permissions")
+ }
+}
+
+func TestACL_Single(t *testing.T) {
+ policy, err := Parse(aclPolicy)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ acl, err := NewACL([]*Policy{policy})
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Type of operation is not important here as we only care about checking
+ // sudo/root
+ request := new(logical.Request)
+ request.Operation = logical.ReadOperation
+ request.Path = "sys/mount/foo"
+ _, rootPrivs := acl.AllowOperation(request)
+ if rootPrivs {
+ t.Fatalf("unexpected root")
+ }
+
+ type tcase struct {
+ op logical.Operation
+ path string
+ allowed bool
+ rootPrivs bool
+ }
+ tcases := []tcase{
+ {logical.ReadOperation, "root", false, false},
+ {logical.HelpOperation, "root", true, false},
+
+ {logical.ReadOperation, "dev/foo", true, true},
+ {logical.UpdateOperation, "dev/foo", true, true},
+
+ {logical.DeleteOperation, "stage/foo", true, false},
+ {logical.ListOperation, "stage/aws/foo", true, true},
+ {logical.UpdateOperation, "stage/aws/foo", true, true},
+ {logical.UpdateOperation, "stage/aws/policy/foo", true, true},
+
+ {logical.DeleteOperation, "prod/foo", false, false},
+ {logical.UpdateOperation, "prod/foo", false, false},
+ {logical.ReadOperation, "prod/foo", true, false},
+ {logical.ListOperation, "prod/foo", true, false},
+ {logical.ReadOperation, "prod/aws/foo", false, false},
+
+ {logical.ReadOperation, "foo/bar", true, true},
+ {logical.ListOperation, "foo/bar", false, true},
+ {logical.UpdateOperation, "foo/bar", false, true},
+ {logical.CreateOperation, "foo/bar", true, true},
+ }
+
+ for _, tc := range tcases {
+ request := new(logical.Request)
+ request.Operation = tc.op
+ request.Path = tc.path
+ allowed, rootPrivs := acl.AllowOperation(request)
+ if allowed != tc.allowed {
+ t.Fatalf("bad: case %#v: %v, %v", tc, allowed, rootPrivs)
+ }
+ if rootPrivs != tc.rootPrivs {
+ t.Fatalf("bad: case %#v: %v, %v", tc, allowed, rootPrivs)
+ }
+ }
+}
+
+func TestACL_Layered(t *testing.T) {
+ policy1, err := Parse(aclPolicy)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ policy2, err := Parse(aclPolicy2)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ acl, err := NewACL([]*Policy{policy1, policy2})
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ testLayeredACL(t, acl)
+}
+
+func testLayeredACL(t *testing.T, acl *ACL) {
+ // Type of operation is not important here as we only care about checking
+ // sudo/root
+ request := new(logical.Request)
+ request.Operation = logical.ReadOperation
+ request.Path = "sys/mount/foo"
+ _, rootPrivs := acl.AllowOperation(request)
+ if rootPrivs {
+ t.Fatalf("unexpected root")
+ }
+
+ type tcase struct {
+ op logical.Operation
+ path string
+ allowed bool
+ rootPrivs bool
+ }
+ tcases := []tcase{
+ {logical.ReadOperation, "root", false, false},
+ {logical.HelpOperation, "root", true, false},
+
+ {logical.ReadOperation, "dev/foo", true, true},
+ {logical.UpdateOperation, "dev/foo", true, true},
+ {logical.ReadOperation, "dev/hide/foo", false, false},
+ {logical.UpdateOperation, "dev/hide/foo", false, false},
+
+ {logical.DeleteOperation, "stage/foo", true, false},
+ {logical.ListOperation, "stage/aws/foo", true, true},
+ {logical.UpdateOperation, "stage/aws/foo", true, true},
+ {logical.UpdateOperation, "stage/aws/policy/foo", false, false},
+
+ {logical.DeleteOperation, "prod/foo", true, false},
+ {logical.UpdateOperation, "prod/foo", true, false},
+ {logical.ReadOperation, "prod/foo", true, false},
+ {logical.ListOperation, "prod/foo", true, false},
+ {logical.ReadOperation, "prod/aws/foo", false, false},
+
+ {logical.ReadOperation, "sys/status", false, false},
+ {logical.UpdateOperation, "sys/seal", true, true},
+
+ {logical.ReadOperation, "foo/bar", false, false},
+ {logical.ListOperation, "foo/bar", false, false},
+ {logical.UpdateOperation, "foo/bar", false, false},
+ {logical.CreateOperation, "foo/bar", false, false},
+ }
+
+ for _, tc := range tcases {
+ request := new(logical.Request)
+ request.Operation = tc.op
+ request.Path = tc.path
+ allowed, rootPrivs := acl.AllowOperation(request)
+ if allowed != tc.allowed {
+ t.Fatalf("bad: case %#v: %v, %v", tc, allowed, rootPrivs)
+ }
+ if rootPrivs != tc.rootPrivs {
+ t.Fatalf("bad: case %#v: %v, %v", tc, allowed, rootPrivs)
+ }
+ }
+}
+
+func TestACL_PolicyMerge(t *testing.T) {
+ policy, err := Parse(mergingPolicies)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ acl, err := NewACL([]*Policy{policy})
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ type tcase struct {
+ path string
+ minWrappingTTL *time.Duration
+ maxWrappingTTL *time.Duration
+ allowed map[string][]interface{}
+ denied map[string][]interface{}
+ }
+
+ createDuration := func(seconds int) *time.Duration {
+ ret := time.Duration(seconds) * time.Second
+ return &ret
+ }
+
+ tcases := []tcase{
+ {"foo/bar", nil, nil, nil, map[string][]interface{}{"zip": []interface{}{}, "baz": []interface{}{}}},
+ {"hello/universe", createDuration(50), createDuration(200), map[string][]interface{}{"foo": []interface{}{}, "bar": []interface{}{}}, nil},
+ {"allow/all", nil, nil, map[string][]interface{}{"*": []interface{}{}, "test": []interface{}{}, "test1": []interface{}{"foo"}}, nil},
+ {"allow/all1", nil, nil, map[string][]interface{}{"*": []interface{}{}, "test": []interface{}{}, "test1": []interface{}{"foo"}}, nil},
+ {"deny/all", nil, nil, nil, map[string][]interface{}{"*": []interface{}{}, "test": []interface{}{}}},
+ {"deny/all1", nil, nil, nil, map[string][]interface{}{"*": []interface{}{}, "test": []interface{}{}}},
+ {"value/merge", nil, nil, map[string][]interface{}{"test": []interface{}{1, 2, 3, 4}}, map[string][]interface{}{"test": []interface{}{1, 2, 3, 4}}},
+ {"value/empty", nil, nil, map[string][]interface{}{"empty": []interface{}{}}, map[string][]interface{}{"empty": []interface{}{}}},
+ }
+
+ for _, tc := range tcases {
+ raw, ok := acl.exactRules.Get(tc.path)
+ if !ok {
+ t.Fatalf("Could not find acl entry for path %s", tc.path)
+ }
+
+ p := raw.(*Permissions)
+ if !reflect.DeepEqual(tc.allowed, p.AllowedParameters) {
+ t.Fatalf("Allowed paramaters did not match, Expected: %#v, Got: %#v", tc.allowed, p.AllowedParameters)
+ }
+ if !reflect.DeepEqual(tc.denied, p.DeniedParameters) {
+ t.Fatalf("Denied paramaters did not match, Expected: %#v, Got: %#v", tc.denied, p.DeniedParameters)
+ }
+ if tc.minWrappingTTL != nil && *tc.minWrappingTTL != p.MinWrappingTTL {
+ t.Fatalf("Min wrapping TTL did not match, Expected: %#v, Got: %#v", tc.minWrappingTTL, p.MinWrappingTTL)
+ }
+ if tc.minWrappingTTL != nil && *tc.maxWrappingTTL != p.MaxWrappingTTL {
+ t.Fatalf("Max wrapping TTL did not match, Expected: %#v, Got: %#v", tc.maxWrappingTTL, p.MaxWrappingTTL)
+ }
+ }
+}
+
+func TestACL_AllowOperation(t *testing.T) {
+ policy, err := Parse(permissionsPolicy)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ acl, err := NewACL([]*Policy{policy})
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ toperations := []logical.Operation{
+ logical.UpdateOperation,
+ logical.CreateOperation,
+ }
+ type tcase struct {
+ path string
+ wrappingTTL *time.Duration
+ parameters []string
+ allowed bool
+ }
+
+ createDuration := func(seconds int) *time.Duration {
+ ret := time.Duration(seconds) * time.Second
+ return &ret
+ }
+
+ tcases := []tcase{
+ {"dev/ops", nil, []string{"zip"}, true},
+ {"foo/bar", nil, []string{"zap"}, false},
+ {"foo/bar", nil, []string{"zip"}, false},
+ {"foo/bar", createDuration(50), []string{"zip"}, false},
+ {"foo/bar", createDuration(450), []string{"zip"}, false},
+ {"foo/bar", createDuration(350), []string{"zip"}, true},
+ {"foo/baz", nil, []string{"hello"}, false},
+ {"foo/baz", createDuration(50), []string{"hello"}, false},
+ {"foo/baz", createDuration(450), []string{"hello"}, true},
+ {"foo/baz", nil, []string{"zap"}, false},
+ {"broken/phone", nil, []string{"steve"}, false},
+ {"working/phone", nil, []string{""}, false},
+ {"working/phone", createDuration(450), []string{""}, false},
+ {"working/phone", createDuration(350), []string{""}, true},
+ {"hello/world", nil, []string{"one"}, false},
+ {"tree/fort", nil, []string{"one"}, true},
+ {"tree/fort", nil, []string{"foo"}, false},
+ {"fruit/apple", nil, []string{"pear"}, false},
+ {"fruit/apple", nil, []string{"one"}, false},
+ {"cold/weather", nil, []string{"four"}, true},
+ {"var/aws", nil, []string{"cold", "warm", "kitty"}, false},
+ }
+
+ for _, tc := range tcases {
+ request := logical.Request{Path: tc.path, Data: make(map[string]interface{})}
+ for _, parameter := range tc.parameters {
+ request.Data[parameter] = ""
+ }
+ if tc.wrappingTTL != nil {
+ request.WrapInfo = &logical.RequestWrapInfo{
+ TTL: *tc.wrappingTTL,
+ }
+ }
+ for _, op := range toperations {
+ request.Operation = op
+ allowed, _ := acl.AllowOperation(&request)
+ if allowed != tc.allowed {
+ t.Fatalf("bad: case %#v: %v", tc, allowed)
+ }
+ }
+ }
+}
+
+func TestACL_ValuePermissions(t *testing.T) {
+ policy, err := Parse(valuePermissionsPolicy)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ acl, err := NewACL([]*Policy{policy})
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ toperations := []logical.Operation{
+ logical.UpdateOperation,
+ logical.CreateOperation,
+ }
+ type tcase struct {
+ path string
+ parameters []string
+ values []interface{}
+ allowed bool
+ }
+
+ tcases := []tcase{
+ {"dev/ops", []string{"allow"}, []interface{}{"good"}, true},
+ {"dev/ops", []string{"allow"}, []interface{}{"bad"}, false},
+ {"foo/bar", []string{"deny"}, []interface{}{"bad"}, false},
+ {"foo/bar", []string{"deny"}, []interface{}{"bad glob"}, false},
+ {"foo/bar", []string{"deny"}, []interface{}{"good"}, true},
+ {"foo/bar", []string{"allow"}, []interface{}{"good"}, true},
+ {"foo/baz", []string{"aLLow"}, []interface{}{"good"}, true},
+ {"foo/baz", []string{"deny"}, []interface{}{"bad"}, false},
+ {"foo/baz", []string{"deny"}, []interface{}{"good"}, false},
+ {"foo/baz", []string{"allow", "deny"}, []interface{}{"good", "bad"}, false},
+ {"foo/baz", []string{"deny", "allow"}, []interface{}{"good", "bad"}, false},
+ {"foo/baz", []string{"deNy", "allow"}, []interface{}{"bad", "good"}, false},
+ {"foo/baz", []string{"aLLow"}, []interface{}{"bad"}, false},
+ {"foo/baz", []string{"Neither"}, []interface{}{"bad"}, false},
+ {"fizz/buzz", []string{"allow_multi"}, []interface{}{"good"}, true},
+ {"fizz/buzz", []string{"allow_multi"}, []interface{}{"good1"}, true},
+ {"fizz/buzz", []string{"allow_multi"}, []interface{}{"good2"}, true},
+ {"fizz/buzz", []string{"allow_multi"}, []interface{}{"glob good2"}, false},
+ {"fizz/buzz", []string{"allow_multi"}, []interface{}{"glob good3"}, true},
+ {"fizz/buzz", []string{"allow_multi"}, []interface{}{"bad"}, false},
+ {"fizz/buzz", []string{"allow_multi"}, []interface{}{"bad"}, false},
+ {"fizz/buzz", []string{"allow_multi", "allow"}, []interface{}{"good1", "good"}, true},
+ {"fizz/buzz", []string{"deny_multi"}, []interface{}{"bad2"}, false},
+ {"fizz/buzz", []string{"deny_multi", "allow_multi"}, []interface{}{"good", "good2"}, false},
+ // {"test/types", []string{"array"}, []interface{}{[1]string{"good"}}, true},
+ {"test/types", []string{"map"}, []interface{}{map[string]interface{}{"good": "one"}}, true},
+ {"test/types", []string{"map"}, []interface{}{map[string]interface{}{"bad": "one"}}, false},
+ {"test/types", []string{"int"}, []interface{}{1}, true},
+ {"test/types", []string{"int"}, []interface{}{3}, false},
+ {"test/types", []string{"bool"}, []interface{}{false}, true},
+ {"test/types", []string{"bool"}, []interface{}{true}, false},
+ {"test/star", []string{"anything"}, []interface{}{true}, true},
+ {"test/star", []string{"foo"}, []interface{}{true}, true},
+ {"test/star", []string{"bar"}, []interface{}{false}, true},
+ {"test/star", []string{"bar"}, []interface{}{true}, false},
+ }
+
+ for _, tc := range tcases {
+ request := logical.Request{Path: tc.path, Data: make(map[string]interface{})}
+ for i, parameter := range tc.parameters {
+ request.Data[parameter] = tc.values[i]
+ }
+ for _, op := range toperations {
+ request.Operation = op
+ allowed, _ := acl.AllowOperation(&request)
+ if allowed != tc.allowed {
+ t.Fatalf("bad: case %#v: %v", tc, allowed)
+ }
+ }
+ }
+}
+
+var tokenCreationPolicy = `
+name = "tokenCreation"
+path "auth/token/create*" {
+ capabilities = ["update", "create", "sudo"]
+}
+`
+
+var aclPolicy = `
+name = "dev"
+path "dev/*" {
+ policy = "sudo"
+}
+path "stage/*" {
+ policy = "write"
+}
+path "stage/aws/*" {
+ policy = "read"
+ capabilities = ["update", "sudo"]
+}
+path "stage/aws/policy/*" {
+ policy = "sudo"
+}
+path "prod/*" {
+ policy = "read"
+}
+path "prod/aws/*" {
+ policy = "deny"
+}
+path "sys/*" {
+ policy = "deny"
+}
+path "foo/bar" {
+ capabilities = ["read", "create", "sudo"]
+}
+`
+
+var aclPolicy2 = `
+name = "ops"
+path "dev/hide/*" {
+ policy = "deny"
+}
+path "stage/aws/policy/*" {
+ policy = "deny"
+ # This should have no effect
+ capabilities = ["read", "update", "sudo"]
+}
+path "prod/*" {
+ policy = "write"
+}
+path "sys/seal" {
+ policy = "sudo"
+}
+path "foo/bar" {
+ capabilities = ["deny"]
+}
+`
+
+//test merging
+var mergingPolicies = `
+name = "ops"
+path "foo/bar" {
+ policy = "write"
+ denied_parameters = {
+ "baz" = []
+ }
+}
+path "foo/bar" {
+ policy = "write"
+ denied_parameters = {
+ "zip" = []
+ }
+}
+path "hello/universe" {
+ policy = "write"
+ allowed_parameters = {
+ "foo" = []
+ }
+ max_wrapping_ttl = 300
+ min_wrapping_ttl = 100
+}
+path "hello/universe" {
+ policy = "write"
+ allowed_parameters = {
+ "bar" = []
+ }
+ max_wrapping_ttl = 200
+ min_wrapping_ttl = 50
+}
+path "allow/all" {
+ policy = "write"
+ allowed_parameters = {
+ "test" = []
+ "test1" = ["foo"]
+ }
+}
+path "allow/all" {
+ policy = "write"
+ allowed_parameters = {
+ "*" = []
+ }
+}
+path "allow/all1" {
+ policy = "write"
+ allowed_parameters = {
+ "*" = []
+ }
+}
+path "allow/all1" {
+ policy = "write"
+ allowed_parameters = {
+ "test" = []
+ "test1" = ["foo"]
+ }
+}
+path "deny/all" {
+ policy = "write"
+ denied_parameters = {
+ "test" = []
+ }
+}
+path "deny/all" {
+ policy = "write"
+ denied_parameters = {
+ "*" = []
+ }
+}
+path "deny/all1" {
+ policy = "write"
+ denied_parameters = {
+ "*" = []
+ }
+}
+path "deny/all1" {
+ policy = "write"
+ denied_parameters = {
+ "test" = []
+ }
+}
+path "value/merge" {
+ policy = "write"
+ allowed_parameters = {
+ "test" = [1, 2]
+ }
+ denied_parameters = {
+ "test" = [1, 2]
+ }
+}
+path "value/merge" {
+ policy = "write"
+ allowed_parameters = {
+ "test" = [3, 4]
+ }
+ denied_parameters = {
+ "test" = [3, 4]
+ }
+}
+path "value/empty" {
+ policy = "write"
+ allowed_parameters = {
+ "empty" = []
+ }
+ denied_parameters = {
+ "empty" = [1]
+ }
+}
+path "value/empty" {
+ policy = "write"
+ allowed_parameters = {
+ "empty" = [1]
+ }
+ denied_parameters = {
+ "empty" = []
+ }
+}
+`
+
+//allow operation testing
+var permissionsPolicy = `
+name = "dev"
+path "dev/*" {
+ policy = "write"
+
+ allowed_parameters = {
+ "zip" = []
+ }
+}
+path "foo/bar" {
+ policy = "write"
+ denied_parameters = {
+ "zap" = []
+ }
+ min_wrapping_ttl = 300
+ max_wrapping_ttl = 400
+}
+path "foo/baz" {
+ policy = "write"
+ allowed_parameters = {
+ "hello" = []
+ }
+ denied_parameters = {
+ "zap" = []
+ }
+ min_wrapping_ttl = 300
+}
+path "working/phone" {
+ policy = "write"
+ max_wrapping_ttl = 400
+}
+path "broken/phone" {
+ policy = "write"
+ allowed_parameters = {
+ "steve" = []
+ }
+ denied_parameters = {
+ "steve" = []
+ }
+}
+path "hello/world" {
+ policy = "write"
+ allowed_parameters = {
+ "*" = []
+ }
+ denied_parameters = {
+ "*" = []
+ }
+}
+path "tree/fort" {
+ policy = "write"
+ allowed_parameters = {
+ "*" = []
+ }
+ denied_parameters = {
+ "foo" = []
+ }
+}
+path "fruit/apple" {
+ policy = "write"
+ allowed_parameters = {
+ "pear" = []
+ }
+ denied_parameters = {
+ "*" = []
+ }
+}
+path "cold/weather" {
+ policy = "write"
+ allowed_parameters = {}
+ denied_parameters = {}
+}
+path "var/aws" {
+ policy = "write"
+ allowed_parameters = {
+ "*" = []
+ }
+ denied_parameters = {
+ "soft" = []
+ "warm" = []
+ "kitty" = []
+ }
+}
+`
+
+//allow operation testing
+var valuePermissionsPolicy = `
+name = "op"
+path "dev/*" {
+ policy = "write"
+
+ allowed_parameters = {
+ "allow" = ["good"]
+ }
+}
+path "foo/bar" {
+ policy = "write"
+ denied_parameters = {
+ "deny" = ["bad*"]
+ }
+}
+path "foo/baz" {
+ policy = "write"
+ allowed_parameters = {
+ "ALLOW" = ["good"]
+ }
+ denied_parameters = {
+ "dEny" = ["bad"]
+ }
+}
+path "fizz/buzz" {
+ policy = "write"
+ allowed_parameters = {
+ "allow_multi" = ["good", "good1", "good2", "*good3"]
+ "allow" = ["good"]
+ }
+ denied_parameters = {
+ "deny_multi" = ["bad", "bad1", "bad2"]
+ }
+}
+path "test/types" {
+ policy = "write"
+ allowed_parameters = {
+ "map" = [{"good" = "one"}]
+ "int" = [1, 2]
+ "bool" = [false]
+ }
+ denied_parameters = {
+ }
+}
+path "test/star" {
+ policy = "write"
+ allowed_parameters = {
+ "*" = []
+ "foo" = []
+ "bar" = [false]
+ }
+ denied_parameters = {
+ }
+}
+`
diff --git a/vendor/github.com/hashicorp/vault/vault/audit.go b/vendor/github.com/hashicorp/vault/vault/audit.go
new file mode 100644
index 0000000..9391843
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/audit.go
@@ -0,0 +1,565 @@
+package vault
+
+import (
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ log "github.com/mgutz/logxi/v1"
+
+ "github.com/armon/go-metrics"
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/audit"
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/helper/salt"
+ "github.com/hashicorp/vault/logical"
+)
+
+const (
+ // coreAuditConfigPath is used to store the audit configuration.
+ // Audit configuration is protected within the Vault itself, which means it
+ // can only be viewed or modified after an unseal.
+ coreAuditConfigPath = "core/audit"
+
+ // coreLocalAuditConfigPath is used to store audit information for local
+ // (non-replicated) mounts
+ coreLocalAuditConfigPath = "core/local-audit"
+
+ // auditBarrierPrefix is the prefix to the UUID used in the
+ // barrier view for the audit backends.
+ auditBarrierPrefix = "audit/"
+
+ // auditTableType is the value we expect to find for the audit table and
+ // corresponding entries
+ auditTableType = "audit"
+)
+
+var (
+ // loadAuditFailed if loading audit tables encounters an error
+ errLoadAuditFailed = errors.New("failed to setup audit table")
+)
+
+// enableAudit is used to enable a new audit backend
+func (c *Core) enableAudit(entry *MountEntry) error {
+ // Ensure we end the path in a slash
+ if !strings.HasSuffix(entry.Path, "/") {
+ entry.Path += "/"
+ }
+
+ // Ensure there is a name
+ if entry.Path == "/" {
+ return fmt.Errorf("backend path must be specified")
+ }
+
+ // Update the audit table
+ c.auditLock.Lock()
+ defer c.auditLock.Unlock()
+
+ // Look for matching name
+ for _, ent := range c.audit.Entries {
+ switch {
+ // Existing is sql/mysql/ new is sql/ or
+ // existing is sql/ and new is sql/mysql/
+ case strings.HasPrefix(ent.Path, entry.Path):
+ fallthrough
+ case strings.HasPrefix(entry.Path, ent.Path):
+ return fmt.Errorf("path already in use")
+ }
+ }
+
+ // Generate a new UUID and view
+ if entry.UUID == "" {
+ entryUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ return err
+ }
+ entry.UUID = entryUUID
+ }
+ viewPath := auditBarrierPrefix + entry.UUID + "/"
+ view := NewBarrierView(c.barrier, viewPath)
+
+ // Lookup the new backend
+ backend, err := c.newAuditBackend(entry, view, entry.Options)
+ if err != nil {
+ return err
+ }
+ if backend == nil {
+ return fmt.Errorf("nil audit backend of type %q returned from factory", entry.Type)
+ }
+
+ newTable := c.audit.shallowClone()
+ newTable.Entries = append(newTable.Entries, entry)
+ if err := c.persistAudit(newTable, entry.Local); err != nil {
+ return errors.New("failed to update audit table")
+ }
+
+ c.audit = newTable
+
+ // Register the backend
+ c.auditBroker.Register(entry.Path, backend, view)
+ if c.logger.IsInfo() {
+ c.logger.Info("core: enabled audit backend", "path", entry.Path, "type", entry.Type)
+ }
+ return nil
+}
+
+// disableAudit is used to disable an existing audit backend
+func (c *Core) disableAudit(path string) (bool, error) {
+ // Ensure we end the path in a slash
+ if !strings.HasSuffix(path, "/") {
+ path += "/"
+ }
+
+ // Remove the entry from the mount table
+ c.auditLock.Lock()
+ defer c.auditLock.Unlock()
+
+ newTable := c.audit.shallowClone()
+ entry := newTable.remove(path)
+
+ // Ensure there was a match
+ if entry == nil {
+ return false, fmt.Errorf("no matching backend")
+ }
+
+ c.removeAuditReloadFunc(entry)
+
+ // When unmounting all entries the JSON code will load back up from storage
+ // as a nil slice, which kills tests...just set it nil explicitly
+ if len(newTable.Entries) == 0 {
+ newTable.Entries = nil
+ }
+
+ // Update the audit table
+ if err := c.persistAudit(newTable, entry.Local); err != nil {
+ return true, errors.New("failed to update audit table")
+ }
+
+ c.audit = newTable
+
+ // Unmount the backend
+ c.auditBroker.Deregister(path)
+ if c.logger.IsInfo() {
+ c.logger.Info("core: disabled audit backend", "path", path)
+ }
+
+ return true, nil
+}
+
+// loadAudits is invoked as part of postUnseal to load the audit table
+func (c *Core) loadAudits() error {
+ auditTable := &MountTable{}
+ localAuditTable := &MountTable{}
+
+ // Load the existing audit table
+ raw, err := c.barrier.Get(coreAuditConfigPath)
+ if err != nil {
+ c.logger.Error("core: failed to read audit table", "error", err)
+ return errLoadAuditFailed
+ }
+ rawLocal, err := c.barrier.Get(coreLocalAuditConfigPath)
+ if err != nil {
+ c.logger.Error("core: failed to read local audit table", "error", err)
+ return errLoadAuditFailed
+ }
+
+ c.auditLock.Lock()
+ defer c.auditLock.Unlock()
+
+ if raw != nil {
+ if err := jsonutil.DecodeJSON(raw.Value, auditTable); err != nil {
+ c.logger.Error("core: failed to decode audit table", "error", err)
+ return errLoadAuditFailed
+ }
+ c.audit = auditTable
+ }
+ if rawLocal != nil {
+ if err := jsonutil.DecodeJSON(rawLocal.Value, localAuditTable); err != nil {
+ c.logger.Error("core: failed to decode local audit table", "error", err)
+ return errLoadAuditFailed
+ }
+ c.audit.Entries = append(c.audit.Entries, localAuditTable.Entries...)
+ }
+
+ // Done if we have restored the audit table
+ if c.audit != nil {
+ needPersist := false
+
+ // Upgrade to typed auth table
+ if c.audit.Type == "" {
+ c.audit.Type = auditTableType
+ needPersist = true
+ }
+
+ // Upgrade to table-scoped entries
+ for _, entry := range c.audit.Entries {
+ if entry.Table == "" {
+ entry.Table = c.audit.Type
+ needPersist = true
+ }
+ }
+
+ if !needPersist {
+ return nil
+ }
+ } else {
+ c.audit = defaultAuditTable()
+ }
+
+ if err := c.persistAudit(c.audit, false); err != nil {
+ return errLoadAuditFailed
+ }
+ return nil
+}
+
+// persistAudit is used to persist the audit table after modification
+func (c *Core) persistAudit(table *MountTable, localOnly bool) error {
+ if table.Type != auditTableType {
+ c.logger.Error("core: given table to persist has wrong type", "actual_type", table.Type, "expected_type", auditTableType)
+ return fmt.Errorf("invalid table type given, not persisting")
+ }
+
+ for _, entry := range table.Entries {
+ if entry.Table != table.Type {
+ c.logger.Error("core: given entry to persist in audit table has wrong table value", "path", entry.Path, "entry_table_type", entry.Table, "actual_type", table.Type)
+ return fmt.Errorf("invalid audit entry found, not persisting")
+ }
+ }
+
+ nonLocalAudit := &MountTable{
+ Type: auditTableType,
+ }
+
+ localAudit := &MountTable{
+ Type: auditTableType,
+ }
+
+ for _, entry := range table.Entries {
+ if entry.Local {
+ localAudit.Entries = append(localAudit.Entries, entry)
+ } else {
+ nonLocalAudit.Entries = append(nonLocalAudit.Entries, entry)
+ }
+ }
+
+ if !localOnly {
+ // Marshal the table
+ compressedBytes, err := jsonutil.EncodeJSONAndCompress(nonLocalAudit, nil)
+ if err != nil {
+ c.logger.Error("core: failed to encode and/or compress audit table", "error", err)
+ return err
+ }
+
+ // Create an entry
+ entry := &Entry{
+ Key: coreAuditConfigPath,
+ Value: compressedBytes,
+ }
+
+ // Write to the physical backend
+ if err := c.barrier.Put(entry); err != nil {
+ c.logger.Error("core: failed to persist audit table", "error", err)
+ return err
+ }
+ }
+
+ // Repeat with local audit
+ compressedBytes, err := jsonutil.EncodeJSONAndCompress(localAudit, nil)
+ if err != nil {
+ c.logger.Error("core: failed to encode and/or compress local audit table", "error", err)
+ return err
+ }
+
+ entry := &Entry{
+ Key: coreLocalAuditConfigPath,
+ Value: compressedBytes,
+ }
+
+ if err := c.barrier.Put(entry); err != nil {
+ c.logger.Error("core: failed to persist local audit table", "error", err)
+ return err
+ }
+
+ return nil
+}
+
+// setupAudit is invoked after we've loaded the audit able to
+// initialize the audit backends
+func (c *Core) setupAudits() error {
+ broker := NewAuditBroker(c.logger)
+
+ c.auditLock.Lock()
+ defer c.auditLock.Unlock()
+
+ var successCount int
+
+ for _, entry := range c.audit.Entries {
+ // Create a barrier view using the UUID
+ viewPath := auditBarrierPrefix + entry.UUID + "/"
+ view := NewBarrierView(c.barrier, viewPath)
+
+ // Initialize the backend
+ backend, err := c.newAuditBackend(entry, view, entry.Options)
+ if err != nil {
+ c.logger.Error("core: failed to create audit entry", "path", entry.Path, "error", err)
+ continue
+ }
+ if backend == nil {
+ c.logger.Error("core: created audit entry was nil", "path", entry.Path, "type", entry.Type)
+ continue
+ }
+
+ // Mount the backend
+ broker.Register(entry.Path, backend, view)
+
+ successCount += 1
+ }
+
+ if len(c.audit.Entries) > 0 && successCount == 0 {
+ return errLoadAuditFailed
+ }
+
+ c.auditBroker = broker
+ return nil
+}
+
+// teardownAudit is used before we seal the vault to reset the audit
+// backends to their unloaded state. This is reversed by loadAudits.
+func (c *Core) teardownAudits() error {
+ c.auditLock.Lock()
+ defer c.auditLock.Unlock()
+
+ if c.audit != nil {
+ for _, entry := range c.audit.Entries {
+ c.removeAuditReloadFunc(entry)
+ }
+ }
+
+ c.audit = nil
+ c.auditBroker = nil
+ return nil
+}
+
+// removeAuditReloadFunc removes the reload func from the working set. The
+// audit lock needs to be held before calling this.
+func (c *Core) removeAuditReloadFunc(entry *MountEntry) {
+ switch entry.Type {
+ case "file":
+ key := "audit_file|" + entry.Path
+ c.reloadFuncsLock.Lock()
+
+ if c.logger.IsDebug() {
+ c.logger.Debug("audit: removing reload function", "path", entry.Path)
+ }
+
+ delete(c.reloadFuncs, key)
+
+ c.reloadFuncsLock.Unlock()
+ }
+}
+
+// newAuditBackend is used to create and configure a new audit backend by name
+func (c *Core) newAuditBackend(entry *MountEntry, view logical.Storage, conf map[string]string) (audit.Backend, error) {
+ f, ok := c.auditBackends[entry.Type]
+ if !ok {
+ return nil, fmt.Errorf("unknown backend type: %s", entry.Type)
+ }
+ salter, err := salt.NewSalt(view, &salt.Config{
+ HMAC: sha256.New,
+ HMACType: "hmac-sha256",
+ })
+ if err != nil {
+ return nil, fmt.Errorf("core: unable to generate salt: %v", err)
+ }
+
+ be, err := f(&audit.BackendConfig{
+ Salt: salter,
+ Config: conf,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if be == nil {
+ return nil, fmt.Errorf("nil backend returned from %q factory function", entry.Type)
+ }
+
+ switch entry.Type {
+ case "file":
+ key := "audit_file|" + entry.Path
+
+ c.reloadFuncsLock.Lock()
+
+ if c.logger.IsDebug() {
+ c.logger.Debug("audit: adding reload function", "path", entry.Path)
+ }
+
+ c.reloadFuncs[key] = append(c.reloadFuncs[key], func(map[string]string) error {
+ if c.logger.IsInfo() {
+ c.logger.Info("audit: reloading file audit backend", "path", entry.Path)
+ }
+ return be.Reload()
+ })
+
+ c.reloadFuncsLock.Unlock()
+ }
+
+ return be, err
+}
+
+// defaultAuditTable creates a default audit table
+func defaultAuditTable() *MountTable {
+ table := &MountTable{
+ Type: auditTableType,
+ }
+ return table
+}
+
+type backendEntry struct {
+ backend audit.Backend
+ view *BarrierView
+}
+
+// AuditBroker is used to provide a single ingest interface to auditable
+// events given that multiple backends may be configured.
+type AuditBroker struct {
+ sync.RWMutex
+ backends map[string]backendEntry
+ logger log.Logger
+}
+
+// NewAuditBroker creates a new audit broker
+func NewAuditBroker(log log.Logger) *AuditBroker {
+ b := &AuditBroker{
+ backends: make(map[string]backendEntry),
+ logger: log,
+ }
+ return b
+}
+
+// Register is used to add new audit backend to the broker
+func (a *AuditBroker) Register(name string, b audit.Backend, v *BarrierView) {
+ a.Lock()
+ defer a.Unlock()
+ a.backends[name] = backendEntry{
+ backend: b,
+ view: v,
+ }
+}
+
+// Deregister is used to remove an audit backend from the broker
+func (a *AuditBroker) Deregister(name string) {
+ a.Lock()
+ defer a.Unlock()
+ delete(a.backends, name)
+}
+
+// IsRegistered is used to check if a given audit backend is registered
+func (a *AuditBroker) IsRegistered(name string) bool {
+ a.RLock()
+ defer a.RUnlock()
+ _, ok := a.backends[name]
+ return ok
+}
+
+// GetHash returns a hash using the salt of the given backend
+func (a *AuditBroker) GetHash(name string, input string) (string, error) {
+ a.RLock()
+ defer a.RUnlock()
+ be, ok := a.backends[name]
+ if !ok {
+ return "", fmt.Errorf("unknown audit backend %s", name)
+ }
+
+ return be.backend.GetHash(input), nil
+}
+
+// LogRequest is used to ensure all the audit backends have an opportunity to
+// log the given request and that *at least one* succeeds.
+func (a *AuditBroker) LogRequest(auth *logical.Auth, req *logical.Request, headersConfig *AuditedHeadersConfig, outerErr error) (retErr error) {
+ defer metrics.MeasureSince([]string{"audit", "log_request"}, time.Now())
+ a.RLock()
+ defer a.RUnlock()
+ defer func() {
+ if r := recover(); r != nil {
+ a.logger.Error("audit: panic during logging", "request_path", req.Path, "error", r)
+ retErr = multierror.Append(retErr, fmt.Errorf("panic generating audit log"))
+ }
+ }()
+
+ // All logged requests must have an identifier
+ //if req.ID == "" {
+ // a.logger.Error("audit: missing identifier in request object", "request_path", req.Path)
+ // retErr = multierror.Append(retErr, fmt.Errorf("missing identifier in request object: %s", req.Path))
+ // return
+ //}
+
+ headers := req.Headers
+ defer func() {
+ req.Headers = headers
+ }()
+
+ // Ensure at least one backend logs
+ anyLogged := false
+ for name, be := range a.backends {
+ req.Headers = nil
+ req.Headers = headersConfig.ApplyConfig(headers, be.backend.GetHash)
+
+ start := time.Now()
+ err := be.backend.LogRequest(auth, req, outerErr)
+ metrics.MeasureSince([]string{"audit", name, "log_request"}, start)
+ if err != nil {
+ a.logger.Error("audit: backend failed to log request", "backend", name, "error", err)
+ } else {
+ anyLogged = true
+ }
+ }
+ if !anyLogged && len(a.backends) > 0 {
+ retErr = multierror.Append(retErr, fmt.Errorf("no audit backend succeeded in logging the request"))
+ return
+ }
+ return nil
+}
+
+// LogResponse is used to ensure all the audit backends have an opportunity to
+// log the given response and that *at least one* succeeds.
+func (a *AuditBroker) LogResponse(auth *logical.Auth, req *logical.Request,
+ resp *logical.Response, headersConfig *AuditedHeadersConfig, err error) (reterr error) {
+ defer metrics.MeasureSince([]string{"audit", "log_response"}, time.Now())
+ a.RLock()
+ defer a.RUnlock()
+ defer func() {
+ if r := recover(); r != nil {
+ a.logger.Error("audit: panic during logging", "request_path", req.Path, "error", r)
+ reterr = fmt.Errorf("panic generating audit log")
+ }
+ }()
+
+ headers := req.Headers
+ defer func() {
+ req.Headers = headers
+ }()
+
+ // Ensure at least one backend logs
+ anyLogged := false
+ for name, be := range a.backends {
+ req.Headers = nil
+ req.Headers = headersConfig.ApplyConfig(headers, be.backend.GetHash)
+
+ start := time.Now()
+ err := be.backend.LogResponse(auth, req, resp, err)
+ metrics.MeasureSince([]string{"audit", name, "log_response"}, start)
+ if err != nil {
+ a.logger.Error("audit: backend failed to log response", "backend", name, "error", err)
+ } else {
+ anyLogged = true
+ }
+ }
+ if !anyLogged && len(a.backends) > 0 {
+ return fmt.Errorf("no audit backend succeeded in logging the response")
+ }
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/audit_test.go b/vendor/github.com/hashicorp/vault/vault/audit_test.go
new file mode 100644
index 0000000..5e97da8
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/audit_test.go
@@ -0,0 +1,601 @@
+package vault
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+ "time"
+
+ "errors"
+
+ "github.com/hashicorp/errwrap"
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/audit"
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/logical"
+ log "github.com/mgutz/logxi/v1"
+ "github.com/mitchellh/copystructure"
+)
+
+type NoopAudit struct {
+ Config *audit.BackendConfig
+ ReqErr error
+ ReqAuth []*logical.Auth
+ Req []*logical.Request
+ ReqHeaders []map[string][]string
+ ReqErrs []error
+
+ RespErr error
+ RespAuth []*logical.Auth
+ RespReq []*logical.Request
+ Resp []*logical.Response
+ RespErrs []error
+}
+
+func (n *NoopAudit) LogRequest(a *logical.Auth, r *logical.Request, err error) error {
+ n.ReqAuth = append(n.ReqAuth, a)
+ n.Req = append(n.Req, r)
+ n.ReqHeaders = append(n.ReqHeaders, r.Headers)
+ n.ReqErrs = append(n.ReqErrs, err)
+ return n.ReqErr
+}
+
+func (n *NoopAudit) LogResponse(a *logical.Auth, r *logical.Request, re *logical.Response, err error) error {
+ n.RespAuth = append(n.RespAuth, a)
+ n.RespReq = append(n.RespReq, r)
+ n.Resp = append(n.Resp, re)
+ n.RespErrs = append(n.RespErrs, err)
+ return n.RespErr
+}
+
+func (n *NoopAudit) GetHash(data string) string {
+ return n.Config.Salt.GetIdentifiedHMAC(data)
+}
+
+func (n *NoopAudit) Reload() error {
+ return nil
+}
+
+func TestCore_EnableAudit(t *testing.T) {
+ c, keys, _ := TestCoreUnsealed(t)
+ c.auditBackends["noop"] = func(config *audit.BackendConfig) (audit.Backend, error) {
+ return &NoopAudit{
+ Config: config,
+ }, nil
+ }
+
+ me := &MountEntry{
+ Table: auditTableType,
+ Path: "foo",
+ Type: "noop",
+ }
+ err := c.enableAudit(me)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if !c.auditBroker.IsRegistered("foo/") {
+ t.Fatalf("missing audit backend")
+ }
+
+ conf := &CoreConfig{
+ Physical: c.physical,
+ AuditBackends: make(map[string]audit.Factory),
+ DisableMlock: true,
+ }
+ conf.AuditBackends["noop"] = func(config *audit.BackendConfig) (audit.Backend, error) {
+ return &NoopAudit{
+ Config: config,
+ }, nil
+ }
+ c2, err := NewCore(conf)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ for i, key := range keys {
+ unseal, err := TestCoreUnseal(c2, key)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if i+1 == len(keys) && !unseal {
+ t.Fatalf("should be unsealed")
+ }
+ }
+
+ // Verify matching audit tables
+ if !reflect.DeepEqual(c.audit, c2.audit) {
+ t.Fatalf("mismatch: %v %v", c.audit, c2.audit)
+ }
+
+ // Check for registration
+ if !c2.auditBroker.IsRegistered("foo/") {
+ t.Fatalf("missing audit backend")
+ }
+}
+
+func TestCore_EnableAudit_MixedFailures(t *testing.T) {
+ c, _, _ := TestCoreUnsealed(t)
+ c.auditBackends["noop"] = func(config *audit.BackendConfig) (audit.Backend, error) {
+ return &NoopAudit{
+ Config: config,
+ }, nil
+ }
+
+ c.auditBackends["fail"] = func(config *audit.BackendConfig) (audit.Backend, error) {
+ return nil, fmt.Errorf("failing enabling")
+ }
+
+ c.audit = &MountTable{
+ Type: auditTableType,
+ Entries: []*MountEntry{
+ &MountEntry{
+ Table: auditTableType,
+ Path: "noop/",
+ Type: "noop",
+ UUID: "abcd",
+ },
+ &MountEntry{
+ Table: auditTableType,
+ Path: "noop2/",
+ Type: "noop",
+ UUID: "bcde",
+ },
+ },
+ }
+
+ // Both should set up successfully
+ err := c.setupAudits()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // We expect this to work because the other entry is still valid
+ c.audit.Entries[0].Type = "fail"
+ err = c.setupAudits()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // No audit backend set up successfully, so expect error
+ c.audit.Entries[1].Type = "fail"
+ err = c.setupAudits()
+ if err == nil {
+ t.Fatal("expected error")
+ }
+}
+
+// Test that the local table actually gets populated as expected with local
+// entries, and that upon reading the entries from both are recombined
+// correctly
+func TestCore_EnableAudit_Local(t *testing.T) {
+ c, _, _ := TestCoreUnsealed(t)
+ c.auditBackends["noop"] = func(config *audit.BackendConfig) (audit.Backend, error) {
+ return &NoopAudit{
+ Config: config,
+ }, nil
+ }
+
+ c.auditBackends["fail"] = func(config *audit.BackendConfig) (audit.Backend, error) {
+ return nil, fmt.Errorf("failing enabling")
+ }
+
+ c.audit = &MountTable{
+ Type: auditTableType,
+ Entries: []*MountEntry{
+ &MountEntry{
+ Table: auditTableType,
+ Path: "noop/",
+ Type: "noop",
+ UUID: "abcd",
+ },
+ &MountEntry{
+ Table: auditTableType,
+ Path: "noop2/",
+ Type: "noop",
+ UUID: "bcde",
+ },
+ },
+ }
+
+ // Both should set up successfully
+ err := c.setupAudits()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ rawLocal, err := c.barrier.Get(coreLocalAuditConfigPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if rawLocal == nil {
+ t.Fatal("expected non-nil local audit")
+ }
+ localAuditTable := &MountTable{}
+ if err := jsonutil.DecodeJSON(rawLocal.Value, localAuditTable); err != nil {
+ t.Fatal(err)
+ }
+ if len(localAuditTable.Entries) > 0 {
+ t.Fatalf("expected no entries in local audit table, got %#v", localAuditTable)
+ }
+
+ c.audit.Entries[1].Local = true
+ if err := c.persistAudit(c.audit, false); err != nil {
+ t.Fatal(err)
+ }
+
+ rawLocal, err = c.barrier.Get(coreLocalAuditConfigPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if rawLocal == nil {
+ t.Fatal("expected non-nil local audit")
+ }
+ localAuditTable = &MountTable{}
+ if err := jsonutil.DecodeJSON(rawLocal.Value, localAuditTable); err != nil {
+ t.Fatal(err)
+ }
+ if len(localAuditTable.Entries) != 1 {
+ t.Fatalf("expected one entry in local audit table, got %#v", localAuditTable)
+ }
+
+ oldAudit := c.audit
+ if err := c.loadAudits(); err != nil {
+ t.Fatal(err)
+ }
+
+ if !reflect.DeepEqual(oldAudit, c.audit) {
+ t.Fatalf("expected\n%#v\ngot\n%#v\n", oldAudit, c.audit)
+ }
+
+ if len(c.audit.Entries) != 2 {
+ t.Fatalf("expected two audit entries, got %#v", localAuditTable)
+ }
+}
+
+func TestCore_DisableAudit(t *testing.T) {
+ c, keys, _ := TestCoreUnsealed(t)
+ c.auditBackends["noop"] = func(config *audit.BackendConfig) (audit.Backend, error) {
+ return &NoopAudit{
+ Config: config,
+ }, nil
+ }
+
+ existed, err := c.disableAudit("foo")
+ if existed && err != nil {
+ t.Fatalf("existed: %v; err: %v", existed, err)
+ }
+
+ me := &MountEntry{
+ Table: auditTableType,
+ Path: "foo",
+ Type: "noop",
+ }
+ err = c.enableAudit(me)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ existed, err = c.disableAudit("foo")
+ if !existed || err != nil {
+ t.Fatalf("existed: %v; err: %v", existed, err)
+ }
+
+ // Check for registration
+ if c.auditBroker.IsRegistered("foo") {
+ t.Fatalf("audit backend present")
+ }
+
+ conf := &CoreConfig{
+ Physical: c.physical,
+ DisableMlock: true,
+ }
+ c2, err := NewCore(conf)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ for i, key := range keys {
+ unseal, err := TestCoreUnseal(c2, key)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if i+1 == len(keys) && !unseal {
+ t.Fatalf("should be unsealed")
+ }
+ }
+
+ // Verify matching mount tables
+ if !reflect.DeepEqual(c.audit, c2.audit) {
+ t.Fatalf("mismatch:\n%#v\n%#v", c.audit, c2.audit)
+ }
+}
+
+func TestCore_DefaultAuditTable(t *testing.T) {
+ c, keys, _ := TestCoreUnsealed(t)
+ verifyDefaultAuditTable(t, c.audit)
+
+ // Verify we have an audit broker
+ if c.auditBroker == nil {
+ t.Fatalf("missing audit broker")
+ }
+
+ // Start a second core with same physical
+ conf := &CoreConfig{
+ Physical: c.physical,
+ DisableMlock: true,
+ }
+ c2, err := NewCore(conf)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ for i, key := range keys {
+ unseal, err := TestCoreUnseal(c2, key)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if i+1 == len(keys) && !unseal {
+ t.Fatalf("should be unsealed")
+ }
+ }
+
+ // Verify matching mount tables
+ if !reflect.DeepEqual(c.audit, c2.audit) {
+ t.Fatalf("mismatch: %v %v", c.audit, c2.audit)
+ }
+}
+
+func TestDefaultAuditTable(t *testing.T) {
+ table := defaultAuditTable()
+ verifyDefaultAuditTable(t, table)
+}
+
+func verifyDefaultAuditTable(t *testing.T, table *MountTable) {
+ if len(table.Entries) != 0 {
+ t.Fatalf("bad: %v", table.Entries)
+ }
+ if table.Type != auditTableType {
+ t.Fatalf("bad: %v", *table)
+ }
+}
+
+func TestAuditBroker_LogRequest(t *testing.T) {
+ l := logformat.NewVaultLogger(log.LevelTrace)
+ b := NewAuditBroker(l)
+ a1 := &NoopAudit{}
+ a2 := &NoopAudit{}
+ b.Register("foo", a1, nil)
+ b.Register("bar", a2, nil)
+
+ auth := &logical.Auth{
+ ClientToken: "foo",
+ Policies: []string{"dev", "ops"},
+ Metadata: map[string]string{
+ "user": "armon",
+ "source": "github",
+ },
+ }
+ req := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "sys/mounts",
+ }
+
+ // Copy so we can verify nothing canged
+ authCopyRaw, err := copystructure.Copy(auth)
+ if err != nil {
+ t.Fatal(err)
+ }
+ authCopy := authCopyRaw.(*logical.Auth)
+
+ reqCopyRaw, err := copystructure.Copy(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ reqCopy := reqCopyRaw.(*logical.Request)
+
+ // Create an identifier for the request to verify against
+ req.ID, err = uuid.GenerateUUID()
+ if err != nil {
+ t.Fatalf("failed to generate identifier for the request: path%s err: %v", req.Path, err)
+ }
+ reqCopy.ID = req.ID
+
+ reqErrs := errors.New("errs")
+
+ headersConf := &AuditedHeadersConfig{
+ Headers: make(map[string]*auditedHeaderSettings),
+ }
+
+ err = b.LogRequest(authCopy, reqCopy, headersConf, reqErrs)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ for _, a := range []*NoopAudit{a1, a2} {
+ if !reflect.DeepEqual(a.ReqAuth[0], auth) {
+ t.Fatalf("Bad: %#v", a.ReqAuth[0])
+ }
+ if !reflect.DeepEqual(a.Req[0], req) {
+ t.Fatalf("Bad: %#v\n wanted %#v", a.Req[0], req)
+ }
+ if !reflect.DeepEqual(a.ReqErrs[0], reqErrs) {
+ t.Fatalf("Bad: %#v", a.ReqErrs[0])
+ }
+ }
+
+ // Should still work with one failing backend
+ a1.ReqErr = fmt.Errorf("failed")
+ if err := b.LogRequest(auth, req, headersConf, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should FAIL work with both failing backends
+ a2.ReqErr = fmt.Errorf("failed")
+ if err := b.LogRequest(auth, req, headersConf, nil); !errwrap.Contains(err, "no audit backend succeeded in logging the request") {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func TestAuditBroker_LogResponse(t *testing.T) {
+ l := logformat.NewVaultLogger(log.LevelTrace)
+ b := NewAuditBroker(l)
+ a1 := &NoopAudit{}
+ a2 := &NoopAudit{}
+ b.Register("foo", a1, nil)
+ b.Register("bar", a2, nil)
+
+ auth := &logical.Auth{
+ NumUses: 10,
+ ClientToken: "foo",
+ Policies: []string{"dev", "ops"},
+ Metadata: map[string]string{
+ "user": "armon",
+ "source": "github",
+ },
+ }
+ req := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "sys/mounts",
+ }
+ resp := &logical.Response{
+ Secret: &logical.Secret{
+ LeaseOptions: logical.LeaseOptions{
+ TTL: 1 * time.Hour,
+ },
+ },
+ Data: map[string]interface{}{
+ "user": "root",
+ "password": "password",
+ },
+ }
+ respErr := fmt.Errorf("permission denied")
+
+ // Copy so we can verify nothing canged
+ authCopyRaw, err := copystructure.Copy(auth)
+ if err != nil {
+ t.Fatal(err)
+ }
+ authCopy := authCopyRaw.(*logical.Auth)
+
+ reqCopyRaw, err := copystructure.Copy(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ reqCopy := reqCopyRaw.(*logical.Request)
+
+ respCopyRaw, err := copystructure.Copy(resp)
+ if err != nil {
+ t.Fatal(err)
+ }
+ respCopy := respCopyRaw.(*logical.Response)
+
+ headersConf := &AuditedHeadersConfig{
+ Headers: make(map[string]*auditedHeaderSettings),
+ }
+
+ err = b.LogResponse(authCopy, reqCopy, respCopy, headersConf, respErr)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ for _, a := range []*NoopAudit{a1, a2} {
+ if !reflect.DeepEqual(a.RespAuth[0], auth) {
+ t.Fatalf("Bad: %#v", a.ReqAuth[0])
+ }
+ if !reflect.DeepEqual(a.RespReq[0], req) {
+ t.Fatalf("Bad: %#v", a.Req[0])
+ }
+ if !reflect.DeepEqual(a.Resp[0], resp) {
+ t.Fatalf("Bad: %#v", a.Resp[0])
+ }
+ if !reflect.DeepEqual(a.RespErrs[0], respErr) {
+ t.Fatalf("Bad: %#v", a.RespErrs[0])
+ }
+ }
+
+ // Should still work with one failing backend
+ a1.RespErr = fmt.Errorf("failed")
+ err = b.LogResponse(auth, req, resp, headersConf, respErr)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should FAIL work with both failing backends
+ a2.RespErr = fmt.Errorf("failed")
+ err = b.LogResponse(auth, req, resp, headersConf, respErr)
+ if err.Error() != "no audit backend succeeded in logging the response" {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func TestAuditBroker_AuditHeaders(t *testing.T) {
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+ b := NewAuditBroker(logger)
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "headers/")
+ a1 := &NoopAudit{}
+ a2 := &NoopAudit{}
+ b.Register("foo", a1, nil)
+ b.Register("bar", a2, nil)
+
+ auth := &logical.Auth{
+ ClientToken: "foo",
+ Policies: []string{"dev", "ops"},
+ Metadata: map[string]string{
+ "user": "armon",
+ "source": "github",
+ },
+ }
+ req := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "sys/mounts",
+ Headers: map[string][]string{
+ "X-Test-Header": []string{"foo"},
+ "X-Vault-Header": []string{"bar"},
+ "Content-Type": []string{"baz"},
+ },
+ }
+ respErr := fmt.Errorf("permission denied")
+
+ // Copy so we can verify nothing canged
+ reqCopyRaw, err := copystructure.Copy(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ reqCopy := reqCopyRaw.(*logical.Request)
+
+ headersConf := &AuditedHeadersConfig{
+ view: view,
+ }
+ headersConf.add("X-Test-Header", false)
+ headersConf.add("X-Vault-Header", false)
+
+ err = b.LogRequest(auth, reqCopy, headersConf, respErr)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ expected := map[string][]string{
+ "x-test-header": []string{"foo"},
+ "x-vault-header": []string{"bar"},
+ }
+
+ for _, a := range []*NoopAudit{a1, a2} {
+ if !reflect.DeepEqual(a.ReqHeaders[0], expected) {
+ t.Fatalf("Bad audited headers: %#v", a.Req[0].Headers)
+ }
+ }
+
+ // Should still work with one failing backend
+ a1.ReqErr = fmt.Errorf("failed")
+ err = b.LogRequest(auth, req, headersConf, respErr)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should FAIL work with both failing backends
+ a2.ReqErr = fmt.Errorf("failed")
+ err = b.LogRequest(auth, req, headersConf, respErr)
+ if !errwrap.Contains(err, "no audit backend succeeded in logging the request") {
+ t.Fatalf("err: %v", err)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/audited_headers.go b/vendor/github.com/hashicorp/vault/vault/audited_headers.go
new file mode 100644
index 0000000..781c035
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/audited_headers.go
@@ -0,0 +1,156 @@
+package vault
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+// N.B.: While we could use textproto to get the canonical mime header, HTTP/2
+// requires all headers to be converted to lower case, so we just do that.
+
+const (
+ // Key used in the BarrierView to store and retrieve the header config
+ auditedHeadersEntry = "audited-headers"
+ // Path used to create a sub view off of BarrierView
+ auditedHeadersSubPath = "audited-headers-config/"
+)
+
+type auditedHeaderSettings struct {
+ HMAC bool `json:"hmac"`
+}
+
+// AuditedHeadersConfig is used by the Audit Broker to write only approved
+// headers to the audit logs. It uses a BarrierView to persist the settings.
+type AuditedHeadersConfig struct {
+ Headers map[string]*auditedHeaderSettings
+
+ view *BarrierView
+ sync.RWMutex
+}
+
+// add adds or overwrites a header in the config and updates the barrier view
+func (a *AuditedHeadersConfig) add(header string, hmac bool) error {
+ if header == "" {
+ return fmt.Errorf("header value cannot be empty")
+ }
+
+ // Grab a write lock
+ a.Lock()
+ defer a.Unlock()
+
+ if a.Headers == nil {
+ a.Headers = make(map[string]*auditedHeaderSettings, 1)
+ }
+
+ a.Headers[strings.ToLower(header)] = &auditedHeaderSettings{hmac}
+ entry, err := logical.StorageEntryJSON(auditedHeadersEntry, a.Headers)
+ if err != nil {
+ return fmt.Errorf("failed to persist audited headers config: %v", err)
+ }
+
+ if err := a.view.Put(entry); err != nil {
+ return fmt.Errorf("failed to persist audited headers config: %v", err)
+ }
+
+ return nil
+}
+
+// remove deletes a header out of the header config and updates the barrier view
+func (a *AuditedHeadersConfig) remove(header string) error {
+ if header == "" {
+ return fmt.Errorf("header value cannot be empty")
+ }
+
+ // Grab a write lock
+ a.Lock()
+ defer a.Unlock()
+
+ // Nothing to delete
+ if len(a.Headers) == 0 {
+ return nil
+ }
+
+ delete(a.Headers, strings.ToLower(header))
+ entry, err := logical.StorageEntryJSON(auditedHeadersEntry, a.Headers)
+ if err != nil {
+ return fmt.Errorf("failed to persist audited headers config: %v", err)
+ }
+
+ if err := a.view.Put(entry); err != nil {
+ return fmt.Errorf("failed to persist audited headers config: %v", err)
+ }
+
+ return nil
+}
+
+// ApplyConfig returns a map of approved headers and their values, either
+// hmac'ed or plaintext
+func (a *AuditedHeadersConfig) ApplyConfig(headers map[string][]string, hashFunc func(string) string) (result map[string][]string) {
+ // Grab a read lock
+ a.RLock()
+ defer a.RUnlock()
+
+ // Make a copy of the incoming headers with everything lower so we can
+ // case-insensitively compare
+ lowerHeaders := make(map[string][]string, len(headers))
+ for k, v := range headers {
+ lowerHeaders[strings.ToLower(k)] = v
+ }
+
+ result = make(map[string][]string, len(a.Headers))
+ for key, settings := range a.Headers {
+ if val, ok := lowerHeaders[key]; ok {
+ // copy the header values so we don't overwrite them
+ hVals := make([]string, len(val))
+ copy(hVals, val)
+
+ // Optionally hmac the values
+ if settings.HMAC {
+ for i, el := range hVals {
+ hVals[i] = hashFunc(el)
+ }
+ }
+
+ result[key] = hVals
+ }
+ }
+
+ return
+}
+
+// Initalize the headers config by loading from the barrier view
+func (c *Core) setupAuditedHeadersConfig() error {
+ // Create a sub-view
+ view := c.systemBarrierView.SubView(auditedHeadersSubPath)
+
+ // Create the config
+ out, err := view.Get(auditedHeadersEntry)
+ if err != nil {
+ return fmt.Errorf("failed to read config: %v", err)
+ }
+
+ headers := make(map[string]*auditedHeaderSettings)
+ if out != nil {
+ err = out.DecodeJSON(&headers)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Ensure that we are able to case-sensitively access the headers;
+ // necessary for the upgrade case
+ lowerHeaders := make(map[string]*auditedHeaderSettings, len(headers))
+ for k, v := range headers {
+ lowerHeaders[strings.ToLower(k)] = v
+ }
+
+ c.auditedHeaders = &AuditedHeadersConfig{
+ Headers: lowerHeaders,
+ view: view,
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/audited_headers_test.go b/vendor/github.com/hashicorp/vault/vault/audited_headers_test.go
new file mode 100644
index 0000000..5e82ec7
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/audited_headers_test.go
@@ -0,0 +1,224 @@
+package vault
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/hashicorp/vault/helper/salt"
+)
+
+func mockAuditedHeadersConfig(t *testing.T) *AuditedHeadersConfig {
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "foo/")
+ return &AuditedHeadersConfig{
+ Headers: make(map[string]*auditedHeaderSettings),
+ view: view,
+ }
+}
+
+func TestAuditedHeadersConfig_CRUD(t *testing.T) {
+ conf := mockAuditedHeadersConfig(t)
+
+ testAuditedHeadersConfig_Add(t, conf)
+ testAuditedHeadersConfig_Remove(t, conf)
+}
+
+func testAuditedHeadersConfig_Add(t *testing.T, conf *AuditedHeadersConfig) {
+ err := conf.add("X-Test-Header", false)
+ if err != nil {
+ t.Fatalf("Error when adding header to config: %s", err)
+ }
+
+ settings, ok := conf.Headers["x-test-header"]
+ if !ok {
+ t.Fatal("Expected header to be found in config")
+ }
+
+ if settings.HMAC {
+ t.Fatal("Expected HMAC to be set to false, got true")
+ }
+
+ out, err := conf.view.Get(auditedHeadersEntry)
+ if err != nil {
+ t.Fatalf("Could not retrieve headers entry from config: %s", err)
+ }
+
+ headers := make(map[string]*auditedHeaderSettings)
+ err = out.DecodeJSON(&headers)
+ if err != nil {
+ t.Fatalf("Error decoding header view: %s", err)
+ }
+
+ expected := map[string]*auditedHeaderSettings{
+ "x-test-header": &auditedHeaderSettings{
+ HMAC: false,
+ },
+ }
+
+ if !reflect.DeepEqual(headers, expected) {
+ t.Fatalf("Expected config didn't match actual. Expected: %#v, Got: %#v", expected, headers)
+ }
+
+ err = conf.add("X-Vault-Header", true)
+ if err != nil {
+ t.Fatalf("Error when adding header to config: %s", err)
+ }
+
+ settings, ok = conf.Headers["x-vault-header"]
+ if !ok {
+ t.Fatal("Expected header to be found in config")
+ }
+
+ if !settings.HMAC {
+ t.Fatal("Expected HMAC to be set to true, got false")
+ }
+
+ out, err = conf.view.Get(auditedHeadersEntry)
+ if err != nil {
+ t.Fatalf("Could not retrieve headers entry from config: %s", err)
+ }
+
+ headers = make(map[string]*auditedHeaderSettings)
+ err = out.DecodeJSON(&headers)
+ if err != nil {
+ t.Fatalf("Error decoding header view: %s", err)
+ }
+
+ expected["x-vault-header"] = &auditedHeaderSettings{
+ HMAC: true,
+ }
+
+ if !reflect.DeepEqual(headers, expected) {
+ t.Fatalf("Expected config didn't match actual. Expected: %#v, Got: %#v", expected, headers)
+ }
+
+}
+
+func testAuditedHeadersConfig_Remove(t *testing.T, conf *AuditedHeadersConfig) {
+ err := conf.remove("X-Test-Header")
+ if err != nil {
+ t.Fatalf("Error when adding header to config: %s", err)
+ }
+
+ _, ok := conf.Headers["x-Test-HeAder"]
+ if ok {
+ t.Fatal("Expected header to not be found in config")
+ }
+
+ out, err := conf.view.Get(auditedHeadersEntry)
+ if err != nil {
+ t.Fatalf("Could not retrieve headers entry from config: %s", err)
+ }
+
+ headers := make(map[string]*auditedHeaderSettings)
+ err = out.DecodeJSON(&headers)
+ if err != nil {
+ t.Fatalf("Error decoding header view: %s", err)
+ }
+
+ expected := map[string]*auditedHeaderSettings{
+ "x-vault-header": &auditedHeaderSettings{
+ HMAC: true,
+ },
+ }
+
+ if !reflect.DeepEqual(headers, expected) {
+ t.Fatalf("Expected config didn't match actual. Expected: %#v, Got: %#v", expected, headers)
+ }
+
+ err = conf.remove("x-VaulT-Header")
+ if err != nil {
+ t.Fatalf("Error when adding header to config: %s", err)
+ }
+
+ _, ok = conf.Headers["x-vault-header"]
+ if ok {
+ t.Fatal("Expected header to not be found in config")
+ }
+
+ out, err = conf.view.Get(auditedHeadersEntry)
+ if err != nil {
+ t.Fatalf("Could not retrieve headers entry from config: %s", err)
+ }
+
+ headers = make(map[string]*auditedHeaderSettings)
+ err = out.DecodeJSON(&headers)
+ if err != nil {
+ t.Fatalf("Error decoding header view: %s", err)
+ }
+
+ expected = make(map[string]*auditedHeaderSettings)
+
+ if !reflect.DeepEqual(headers, expected) {
+ t.Fatalf("Expected config didn't match actual. Expected: %#v, Got: %#v", expected, headers)
+ }
+}
+
+func TestAuditedHeadersConfig_ApplyConfig(t *testing.T) {
+ conf := mockAuditedHeadersConfig(t)
+
+ conf.add("X-TesT-Header", false)
+ conf.add("X-Vault-HeAdEr", true)
+
+ reqHeaders := map[string][]string{
+ "X-Test-Header": []string{"foo"},
+ "X-Vault-Header": []string{"bar", "bar"},
+ "Content-Type": []string{"json"},
+ }
+
+ hashFunc := func(s string) string { return "hashed" }
+
+ result := conf.ApplyConfig(reqHeaders, hashFunc)
+
+ expected := map[string][]string{
+ "x-test-header": []string{"foo"},
+ "x-vault-header": []string{"hashed", "hashed"},
+ }
+
+ if !reflect.DeepEqual(result, expected) {
+ t.Fatalf("Expected headers did not match actual: Expected %#v\n Got %#v\n", expected, result)
+ }
+
+ //Make sure we didn't edit the reqHeaders map
+ reqHeadersCopy := map[string][]string{
+ "X-Test-Header": []string{"foo"},
+ "X-Vault-Header": []string{"bar", "bar"},
+ "Content-Type": []string{"json"},
+ }
+
+ if !reflect.DeepEqual(reqHeaders, reqHeadersCopy) {
+ t.Fatalf("Req headers were changed, expected %#v\n got %#v", reqHeadersCopy, reqHeaders)
+ }
+
+}
+
+func BenchmarkAuditedHeaderConfig_ApplyConfig(b *testing.B) {
+ conf := &AuditedHeadersConfig{
+ Headers: make(map[string]*auditedHeaderSettings),
+ view: nil,
+ }
+
+ conf.Headers = map[string]*auditedHeaderSettings{
+ "X-Test-Header": &auditedHeaderSettings{false},
+ "X-Vault-Header": &auditedHeaderSettings{true},
+ }
+
+ reqHeaders := map[string][]string{
+ "X-Test-Header": []string{"foo"},
+ "X-Vault-Header": []string{"bar", "bar"},
+ "Content-Type": []string{"json"},
+ }
+
+ salter, err := salt.NewSalt(nil, nil)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ hashFunc := func(s string) string { return salter.GetIdentifiedHMAC(s) }
+
+ // Reset the timer since we did a lot above
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ conf.ApplyConfig(reqHeaders, hashFunc)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/auth.go b/vendor/github.com/hashicorp/vault/vault/auth.go
new file mode 100644
index 0000000..5a5e68b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/auth.go
@@ -0,0 +1,505 @@
+package vault
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/logical"
+)
+
+const (
+ // coreAuthConfigPath is used to store the auth configuration.
+ // Auth configuration is protected within the Vault itself, which means it
+ // can only be viewed or modified after an unseal.
+ coreAuthConfigPath = "core/auth"
+
+ // coreLocalAuthConfigPath is used to store credential configuration for
+ // local (non-replicated) mounts
+ coreLocalAuthConfigPath = "core/local-auth"
+
+ // credentialBarrierPrefix is the prefix to the UUID used in the
+ // barrier view for the credential backends.
+ credentialBarrierPrefix = "auth/"
+
+ // credentialRoutePrefix is the mount prefix used for the router
+ credentialRoutePrefix = "auth/"
+
+ // credentialTableType is the value we expect to find for the credential
+ // table and corresponding entries
+ credentialTableType = "auth"
+)
+
+var (
+ // errLoadAuthFailed if loadCredentials encounters an error
+ errLoadAuthFailed = errors.New("failed to setup auth table")
+
+ // credentialAliases maps old backend names to new backend names, allowing us
+ // to move/rename backends but maintain backwards compatibility
+ credentialAliases = map[string]string{"aws-ec2": "aws"}
+)
+
+// enableCredential is used to enable a new credential backend
+func (c *Core) enableCredential(entry *MountEntry) error {
+ // Ensure we end the path in a slash
+ if !strings.HasSuffix(entry.Path, "/") {
+ entry.Path += "/"
+ }
+
+ // Ensure there is a name
+ if entry.Path == "/" {
+ return fmt.Errorf("backend path must be specified")
+ }
+
+ c.authLock.Lock()
+ defer c.authLock.Unlock()
+
+ // Look for matching name
+ for _, ent := range c.auth.Entries {
+ switch {
+ // Existing is oauth/github/ new is oauth/ or
+ // existing is oauth/ and new is oauth/github/
+ case strings.HasPrefix(ent.Path, entry.Path):
+ fallthrough
+ case strings.HasPrefix(entry.Path, ent.Path):
+ return logical.CodedError(409, "path is already in use")
+ }
+ }
+
+ // Ensure the token backend is a singleton
+ if entry.Type == "token" {
+ return fmt.Errorf("token credential backend cannot be instantiated")
+ }
+
+ if match := c.router.MatchingMount(credentialRoutePrefix + entry.Path); match != "" {
+ return logical.CodedError(409, fmt.Sprintf("existing mount at %s", match))
+ }
+
+ // Generate a new UUID and view
+ if entry.UUID == "" {
+ entryUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ return err
+ }
+ entry.UUID = entryUUID
+ }
+
+ viewPath := credentialBarrierPrefix + entry.UUID + "/"
+ view := NewBarrierView(c.barrier, viewPath)
+ sysView := c.mountEntrySysView(entry)
+
+ // Create the new backend
+ backend, err := c.newCredentialBackend(entry.Type, sysView, view, nil)
+ if err != nil {
+ return err
+ }
+ if backend == nil {
+ return fmt.Errorf("nil backend returned from %q factory", entry.Type)
+ }
+
+ if err := backend.Initialize(); err != nil {
+ return err
+ }
+
+ // Update the auth table
+ newTable := c.auth.shallowClone()
+ newTable.Entries = append(newTable.Entries, entry)
+ if err := c.persistAuth(newTable, entry.Local); err != nil {
+ return errors.New("failed to update auth table")
+ }
+
+ c.auth = newTable
+
+ path := credentialRoutePrefix + entry.Path
+ if err := c.router.Mount(backend, path, entry, view); err != nil {
+ return err
+ }
+
+ if c.logger.IsInfo() {
+ c.logger.Info("core: enabled credential backend", "path", entry.Path, "type", entry.Type)
+ }
+ return nil
+}
+
+// disableCredential is used to disable an existing credential backend; the
+// boolean indicates if it existed
+func (c *Core) disableCredential(path string) (bool, error) {
+ // Ensure we end the path in a slash
+ if !strings.HasSuffix(path, "/") {
+ path += "/"
+ }
+
+ // Ensure the token backend is not affected
+ if path == "token/" {
+ return true, fmt.Errorf("token credential backend cannot be disabled")
+ }
+
+ // Store the view for this backend
+ fullPath := credentialRoutePrefix + path
+ view := c.router.MatchingStorageView(fullPath)
+ if view == nil {
+ return false, fmt.Errorf("no matching backend %s", fullPath)
+ }
+
+ // Mark the entry as tainted
+ if err := c.taintCredEntry(path); err != nil {
+ return true, err
+ }
+
+ // Taint the router path to prevent routing
+ if err := c.router.Taint(fullPath); err != nil {
+ return true, err
+ }
+
+ // Revoke credentials from this path
+ if err := c.expiration.RevokePrefix(fullPath); err != nil {
+ return true, err
+ }
+
+ // Call cleanup function if it exists
+ backend := c.router.MatchingBackend(fullPath)
+ if backend != nil {
+ backend.Cleanup()
+ }
+
+ // Unmount the backend
+ if err := c.router.Unmount(fullPath); err != nil {
+ return true, err
+ }
+
+ // Clear the data in the view
+ if view != nil {
+ if err := logical.ClearView(view); err != nil {
+ return true, err
+ }
+ }
+
+ // Remove the mount table entry
+ if err := c.removeCredEntry(path); err != nil {
+ return true, err
+ }
+ if c.logger.IsInfo() {
+ c.logger.Info("core: disabled credential backend", "path", path)
+ }
+ return true, nil
+}
+
+// removeCredEntry is used to remove an entry in the auth table
+func (c *Core) removeCredEntry(path string) error {
+ c.authLock.Lock()
+ defer c.authLock.Unlock()
+
+ // Taint the entry from the auth table
+ newTable := c.auth.shallowClone()
+ entry := newTable.remove(path)
+ if entry == nil {
+ c.logger.Error("core: nil entry found removing entry in auth table", "path", path)
+ return logical.CodedError(500, "failed to remove entry in auth table")
+ }
+
+ // Update the auth table
+ if err := c.persistAuth(newTable, entry.Local); err != nil {
+ return errors.New("failed to update auth table")
+ }
+
+ c.auth = newTable
+
+ return nil
+}
+
+// taintCredEntry is used to mark an entry in the auth table as tainted
+func (c *Core) taintCredEntry(path string) error {
+ c.authLock.Lock()
+ defer c.authLock.Unlock()
+
+ // Taint the entry from the auth table
+ // We do this on the original since setting the taint operates
+ // on the entries which a shallow clone shares anyways
+ entry := c.auth.setTaint(path, true)
+
+ // Ensure there was a match
+ if entry == nil {
+ return fmt.Errorf("no matching backend")
+ }
+
+ // Update the auth table
+ if err := c.persistAuth(c.auth, entry.Local); err != nil {
+ return errors.New("failed to update auth table")
+ }
+
+ return nil
+}
+
+// loadCredentials is invoked as part of postUnseal to load the auth table
+func (c *Core) loadCredentials() error {
+ authTable := &MountTable{}
+ localAuthTable := &MountTable{}
+
+ // Load the existing mount table
+ raw, err := c.barrier.Get(coreAuthConfigPath)
+ if err != nil {
+ c.logger.Error("core: failed to read auth table", "error", err)
+ return errLoadAuthFailed
+ }
+ rawLocal, err := c.barrier.Get(coreLocalAuthConfigPath)
+ if err != nil {
+ c.logger.Error("core: failed to read local auth table", "error", err)
+ return errLoadAuthFailed
+ }
+
+ c.authLock.Lock()
+ defer c.authLock.Unlock()
+
+ if raw != nil {
+ if err := jsonutil.DecodeJSON(raw.Value, authTable); err != nil {
+ c.logger.Error("core: failed to decode auth table", "error", err)
+ return errLoadAuthFailed
+ }
+ c.auth = authTable
+ }
+ if rawLocal != nil {
+ if err := jsonutil.DecodeJSON(rawLocal.Value, localAuthTable); err != nil {
+ c.logger.Error("core: failed to decode local auth table", "error", err)
+ return errLoadAuthFailed
+ }
+ c.auth.Entries = append(c.auth.Entries, localAuthTable.Entries...)
+ }
+
+ // Done if we have restored the auth table
+ if c.auth != nil {
+ needPersist := false
+
+ // Upgrade to typed auth table
+ if c.auth.Type == "" {
+ c.auth.Type = credentialTableType
+ needPersist = true
+ }
+
+ // Upgrade to table-scoped entries
+ for _, entry := range c.auth.Entries {
+ if entry.Table == "" {
+ entry.Table = c.auth.Type
+ needPersist = true
+ }
+ }
+
+ if !needPersist {
+ return nil
+ }
+ } else {
+ c.auth = defaultAuthTable()
+ }
+
+ if err := c.persistAuth(c.auth, false); err != nil {
+ c.logger.Error("core: failed to persist auth table", "error", err)
+ return errLoadAuthFailed
+ }
+ return nil
+}
+
+// persistAuth is used to persist the auth table after modification
+func (c *Core) persistAuth(table *MountTable, localOnly bool) error {
+ if table.Type != credentialTableType {
+ c.logger.Error("core: given table to persist has wrong type", "actual_type", table.Type, "expected_type", credentialTableType)
+ return fmt.Errorf("invalid table type given, not persisting")
+ }
+
+ for _, entry := range table.Entries {
+ if entry.Table != table.Type {
+ c.logger.Error("core: given entry to persist in auth table has wrong table value", "path", entry.Path, "entry_table_type", entry.Table, "actual_type", table.Type)
+ return fmt.Errorf("invalid auth entry found, not persisting")
+ }
+ }
+
+ nonLocalAuth := &MountTable{
+ Type: credentialTableType,
+ }
+
+ localAuth := &MountTable{
+ Type: credentialTableType,
+ }
+
+ for _, entry := range table.Entries {
+ if entry.Local {
+ localAuth.Entries = append(localAuth.Entries, entry)
+ } else {
+ nonLocalAuth.Entries = append(nonLocalAuth.Entries, entry)
+ }
+ }
+
+ if !localOnly {
+ // Marshal the table
+ compressedBytes, err := jsonutil.EncodeJSONAndCompress(nonLocalAuth, nil)
+ if err != nil {
+ c.logger.Error("core: failed to encode and/or compress auth table", "error", err)
+ return err
+ }
+
+ // Create an entry
+ entry := &Entry{
+ Key: coreAuthConfigPath,
+ Value: compressedBytes,
+ }
+
+ // Write to the physical backend
+ if err := c.barrier.Put(entry); err != nil {
+ c.logger.Error("core: failed to persist auth table", "error", err)
+ return err
+ }
+ }
+
+ // Repeat with local auth
+ compressedBytes, err := jsonutil.EncodeJSONAndCompress(localAuth, nil)
+ if err != nil {
+ c.logger.Error("core: failed to encode and/or compress local auth table", "error", err)
+ return err
+ }
+
+ entry := &Entry{
+ Key: coreLocalAuthConfigPath,
+ Value: compressedBytes,
+ }
+
+ if err := c.barrier.Put(entry); err != nil {
+ c.logger.Error("core: failed to persist local auth table", "error", err)
+ return err
+ }
+
+ return nil
+}
+
+// setupCredentials is invoked after we've loaded the auth table to
+// initialize the credential backends and setup the router
+func (c *Core) setupCredentials() error {
+ var backend logical.Backend
+ var view *BarrierView
+ var err error
+ var persistNeeded bool
+
+ c.authLock.Lock()
+ defer c.authLock.Unlock()
+
+ for _, entry := range c.auth.Entries {
+ // Work around some problematic code that existed in master for a while
+ if strings.HasPrefix(entry.Path, credentialRoutePrefix) {
+ entry.Path = strings.TrimPrefix(entry.Path, credentialRoutePrefix)
+ persistNeeded = true
+ }
+
+ // Create a barrier view using the UUID
+ viewPath := credentialBarrierPrefix + entry.UUID + "/"
+ view = NewBarrierView(c.barrier, viewPath)
+ sysView := c.mountEntrySysView(entry)
+
+ // Initialize the backend
+ backend, err = c.newCredentialBackend(entry.Type, sysView, view, nil)
+ if err != nil {
+ c.logger.Error("core: failed to create credential entry", "path", entry.Path, "error", err)
+ return errLoadAuthFailed
+ }
+ if backend == nil {
+ return fmt.Errorf("nil backend returned from %q factory", entry.Type)
+ }
+
+ if err := backend.Initialize(); err != nil {
+ return err
+ }
+
+ // Mount the backend
+ path := credentialRoutePrefix + entry.Path
+ err = c.router.Mount(backend, path, entry, view)
+ if err != nil {
+ c.logger.Error("core: failed to mount auth entry", "path", entry.Path, "error", err)
+ return errLoadAuthFailed
+ }
+
+ // Ensure the path is tainted if set in the mount table
+ if entry.Tainted {
+ c.router.Taint(path)
+ }
+
+ // Check if this is the token store
+ if entry.Type == "token" {
+ c.tokenStore = backend.(*TokenStore)
+
+ // this is loaded *after* the normal mounts, including cubbyhole
+ c.router.tokenStoreSalt = c.tokenStore.salt
+ c.tokenStore.cubbyholeBackend = c.router.MatchingBackend("cubbyhole/").(*CubbyholeBackend)
+ }
+ }
+
+ if persistNeeded {
+ return c.persistAuth(c.auth, false)
+ }
+
+ return nil
+}
+
+// teardownCredentials is used before we seal the vault to reset the credential
+// backends to their unloaded state. This is reversed by loadCredentials.
+func (c *Core) teardownCredentials() error {
+ c.authLock.Lock()
+ defer c.authLock.Unlock()
+
+ if c.auth != nil {
+ authTable := c.auth.shallowClone()
+ for _, e := range authTable.Entries {
+ backend := c.router.MatchingBackend(credentialRoutePrefix + e.Path)
+ if backend != nil {
+ backend.Cleanup()
+ }
+ }
+ }
+
+ c.auth = nil
+ c.tokenStore = nil
+ return nil
+}
+
+// newCredentialBackend is used to create and configure a new credential backend by name
+func (c *Core) newCredentialBackend(
+ t string, sysView logical.SystemView, view logical.Storage, conf map[string]string) (logical.Backend, error) {
+ if alias, ok := credentialAliases[t]; ok {
+ t = alias
+ }
+ f, ok := c.credentialBackends[t]
+ if !ok {
+ return nil, fmt.Errorf("unknown backend type: %s", t)
+ }
+
+ config := &logical.BackendConfig{
+ StorageView: view,
+ Logger: c.logger,
+ Config: conf,
+ System: sysView,
+ }
+
+ b, err := f(config)
+ if err != nil {
+ return nil, err
+ }
+
+ return b, nil
+}
+
+// defaultAuthTable creates a default auth table
+func defaultAuthTable() *MountTable {
+ table := &MountTable{
+ Type: credentialTableType,
+ }
+ tokenUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ panic(fmt.Sprintf("could not generate UUID for default auth table token entry: %v", err))
+ }
+ tokenAuth := &MountEntry{
+ Table: credentialTableType,
+ Path: "token/",
+ Type: "token",
+ Description: "token based credentials",
+ UUID: tokenUUID,
+ }
+ table.Entries = append(table.Entries, tokenAuth)
+ return table
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/auth_test.go b/vendor/github.com/hashicorp/vault/vault/auth_test.go
new file mode 100644
index 0000000..bc150e9
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/auth_test.go
@@ -0,0 +1,378 @@
+package vault
+
+import (
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestCore_DefaultAuthTable(t *testing.T) {
+ c, keys, _ := TestCoreUnsealed(t)
+ verifyDefaultAuthTable(t, c.auth)
+
+ // Start a second core with same physical
+ conf := &CoreConfig{
+ Physical: c.physical,
+ DisableMlock: true,
+ }
+ c2, err := NewCore(conf)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ for i, key := range keys {
+ unseal, err := TestCoreUnseal(c2, key)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if i+1 == len(keys) && !unseal {
+ t.Fatalf("should be unsealed")
+ }
+ }
+
+ // Verify matching mount tables
+ if !reflect.DeepEqual(c.auth, c2.auth) {
+ t.Fatalf("mismatch: %v %v", c.auth, c2.auth)
+ }
+}
+
+func TestCore_EnableCredential(t *testing.T) {
+ c, keys, _ := TestCoreUnsealed(t)
+ c.credentialBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
+ return &NoopBackend{}, nil
+ }
+
+ me := &MountEntry{
+ Table: credentialTableType,
+ Path: "foo",
+ Type: "noop",
+ }
+ err := c.enableCredential(me)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ match := c.router.MatchingMount("auth/foo/bar")
+ if match != "auth/foo/" {
+ t.Fatalf("missing mount")
+ }
+
+ conf := &CoreConfig{
+ Physical: c.physical,
+ DisableMlock: true,
+ }
+ c2, err := NewCore(conf)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ c2.credentialBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
+ return &NoopBackend{}, nil
+ }
+ for i, key := range keys {
+ unseal, err := TestCoreUnseal(c2, key)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if i+1 == len(keys) && !unseal {
+ t.Fatalf("should be unsealed")
+ }
+ }
+
+ // Verify matching auth tables
+ if !reflect.DeepEqual(c.auth, c2.auth) {
+ t.Fatalf("mismatch: %v %v", c.auth, c2.auth)
+ }
+}
+
+// Test that the local table actually gets populated as expected with local
+// entries, and that upon reading the entries from both are recombined
+// correctly
+func TestCore_EnableCredential_Local(t *testing.T) {
+ c, _, _ := TestCoreUnsealed(t)
+ c.credentialBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
+ return &NoopBackend{}, nil
+ }
+
+ c.auth = &MountTable{
+ Type: credentialTableType,
+ Entries: []*MountEntry{
+ &MountEntry{
+ Table: credentialTableType,
+ Path: "noop/",
+ Type: "noop",
+ UUID: "abcd",
+ },
+ &MountEntry{
+ Table: credentialTableType,
+ Path: "noop2/",
+ Type: "noop",
+ UUID: "bcde",
+ },
+ },
+ }
+
+ // Both should set up successfully
+ err := c.setupCredentials()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ rawLocal, err := c.barrier.Get(coreLocalAuthConfigPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if rawLocal == nil {
+ t.Fatal("expected non-nil local credential")
+ }
+ localCredentialTable := &MountTable{}
+ if err := jsonutil.DecodeJSON(rawLocal.Value, localCredentialTable); err != nil {
+ t.Fatal(err)
+ }
+ if len(localCredentialTable.Entries) > 0 {
+ t.Fatalf("expected no entries in local credential table, got %#v", localCredentialTable)
+ }
+
+ c.auth.Entries[1].Local = true
+ if err := c.persistAuth(c.auth, false); err != nil {
+ t.Fatal(err)
+ }
+
+ rawLocal, err = c.barrier.Get(coreLocalAuthConfigPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if rawLocal == nil {
+ t.Fatal("expected non-nil local credential")
+ }
+ localCredentialTable = &MountTable{}
+ if err := jsonutil.DecodeJSON(rawLocal.Value, localCredentialTable); err != nil {
+ t.Fatal(err)
+ }
+ if len(localCredentialTable.Entries) != 1 {
+ t.Fatalf("expected one entry in local credential table, got %#v", localCredentialTable)
+ }
+
+ oldCredential := c.auth
+ if err := c.loadCredentials(); err != nil {
+ t.Fatal(err)
+ }
+
+ if !reflect.DeepEqual(oldCredential, c.auth) {
+ t.Fatalf("expected\n%#v\ngot\n%#v\n", oldCredential, c.auth)
+ }
+
+ if len(c.auth.Entries) != 2 {
+ t.Fatalf("expected two credential entries, got %#v", localCredentialTable)
+ }
+}
+
+func TestCore_EnableCredential_twice_409(t *testing.T) {
+ c, _, _ := TestCoreUnsealed(t)
+ c.credentialBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
+ return &NoopBackend{}, nil
+ }
+
+ me := &MountEntry{
+ Table: credentialTableType,
+ Path: "foo",
+ Type: "noop",
+ }
+ err := c.enableCredential(me)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // 2nd should be a 409 error
+ err2 := c.enableCredential(me)
+ switch err2.(type) {
+ case logical.HTTPCodedError:
+ if err2.(logical.HTTPCodedError).Code() != 409 {
+ t.Fatalf("invalid code given")
+ }
+ default:
+ t.Fatalf("expected a different error type")
+ }
+}
+
+func TestCore_EnableCredential_Token(t *testing.T) {
+ c, _, _ := TestCoreUnsealed(t)
+ me := &MountEntry{
+ Table: credentialTableType,
+ Path: "foo",
+ Type: "token",
+ }
+ err := c.enableCredential(me)
+ if err.Error() != "token credential backend cannot be instantiated" {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func TestCore_DisableCredential(t *testing.T) {
+ c, keys, _ := TestCoreUnsealed(t)
+ c.credentialBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
+ return &NoopBackend{}, nil
+ }
+
+ existed, err := c.disableCredential("foo")
+ if existed || (err != nil && !strings.HasPrefix(err.Error(), "no matching backend")) {
+ t.Fatalf("existed: %v; err: %v", existed, err)
+ }
+
+ me := &MountEntry{
+ Table: credentialTableType,
+ Path: "foo",
+ Type: "noop",
+ }
+ err = c.enableCredential(me)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ existed, err = c.disableCredential("foo")
+ if !existed || err != nil {
+ t.Fatalf("existed: %v; err: %v", existed, err)
+ }
+
+ match := c.router.MatchingMount("auth/foo/bar")
+ if match != "" {
+ t.Fatalf("backend present")
+ }
+
+ conf := &CoreConfig{
+ Physical: c.physical,
+ DisableMlock: true,
+ }
+ c2, err := NewCore(conf)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ for i, key := range keys {
+ unseal, err := TestCoreUnseal(c2, key)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if i+1 == len(keys) && !unseal {
+ t.Fatalf("should be unsealed")
+ }
+ }
+
+ // Verify matching mount tables
+ if !reflect.DeepEqual(c.auth, c2.auth) {
+ t.Fatalf("mismatch: %v %v", c.auth, c2.auth)
+ }
+}
+
+func TestCore_DisableCredential_Protected(t *testing.T) {
+ c, _, _ := TestCoreUnsealed(t)
+ existed, err := c.disableCredential("token")
+ if !existed || err.Error() != "token credential backend cannot be disabled" {
+ t.Fatalf("existed: %v; err: %v", existed, err)
+ }
+}
+
+func TestCore_DisableCredential_Cleanup(t *testing.T) {
+ noop := &NoopBackend{
+ Login: []string{"login"},
+ }
+ c, _, _ := TestCoreUnsealed(t)
+ c.credentialBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
+ return noop, nil
+ }
+
+ me := &MountEntry{
+ Table: credentialTableType,
+ Path: "foo",
+ Type: "noop",
+ }
+ err := c.enableCredential(me)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Store the view
+ view := c.router.MatchingStorageView("auth/foo/")
+
+ // Inject data
+ se := &logical.StorageEntry{
+ Key: "plstodelete",
+ Value: []byte("test"),
+ }
+ if err := view.Put(se); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Generate a new token auth
+ noop.Response = &logical.Response{
+ Auth: &logical.Auth{
+ Policies: []string{"foo"},
+ },
+ }
+ r := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "auth/foo/login",
+ }
+ resp, err := c.HandleRequest(r)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp.Auth.ClientToken == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Disable should cleanup
+ existed, err := c.disableCredential("foo")
+ if !existed || err != nil {
+ t.Fatalf("existed: %v; err: %v", existed, err)
+ }
+
+ // Token should be revoked
+ te, err := c.tokenStore.Lookup(resp.Auth.ClientToken)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if te != nil {
+ t.Fatalf("bad: %#v", te)
+ }
+
+ // View should be empty
+ out, err := logical.CollectKeys(view)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(out) != 0 {
+ t.Fatalf("bad: %#v", out)
+ }
+}
+
+func TestDefaultAuthTable(t *testing.T) {
+ table := defaultAuthTable()
+ verifyDefaultAuthTable(t, table)
+}
+
+func verifyDefaultAuthTable(t *testing.T, table *MountTable) {
+ if len(table.Entries) != 1 {
+ t.Fatalf("bad: %v", table.Entries)
+ }
+ if table.Type != credentialTableType {
+ t.Fatalf("bad: %v", *table)
+ }
+ for idx, entry := range table.Entries {
+ switch idx {
+ case 0:
+ if entry.Path != "token/" {
+ t.Fatalf("bad: %v", entry)
+ }
+ if entry.Type != "token" {
+ t.Fatalf("bad: %v", entry)
+ }
+ }
+ if entry.Description == "" {
+ t.Fatalf("bad: %v", entry)
+ }
+ if entry.UUID == "" {
+ t.Fatalf("bad: %v", entry)
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/barrier.go b/vendor/github.com/hashicorp/vault/vault/barrier.go
new file mode 100644
index 0000000..7c9acc0
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/barrier.go
@@ -0,0 +1,179 @@
+package vault
+
+import (
+ "errors"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+var (
+ // ErrBarrierSealed is returned if an operation is performed on
+ // a sealed barrier. No operation is expected to succeed before unsealing
+ ErrBarrierSealed = errors.New("Vault is sealed")
+
+ // ErrBarrierAlreadyInit is returned if the barrier is already
+ // initialized. This prevents a re-initialization.
+ ErrBarrierAlreadyInit = errors.New("Vault is already initialized")
+
+ // ErrBarrierNotInit is returned if a non-initialized barrier
+ // is attempted to be unsealed.
+ ErrBarrierNotInit = errors.New("Vault is not initialized")
+
+ // ErrBarrierInvalidKey is returned if the Unseal key is invalid
+ ErrBarrierInvalidKey = errors.New("Unseal failed, invalid key")
+)
+
+const (
+ // barrierInitPath is the path used to store our init sentinel file
+ barrierInitPath = "barrier/init"
+
+ // keyringPath is the location of the keyring data. This is encrypted
+ // by the master key.
+ keyringPath = "core/keyring"
+
+ // keyringUpgradePrefix is the path used to store keyring update entries.
+ // When running in HA mode, the active instance will install the new key
+ // and re-write the keyring. For standby instances, they need an upgrade
+ // path from key N to N+1. They cannot just use the master key because
+ // in the event of a rekey, that master key can no longer decrypt the keyring.
+ // When key N+1 is installed, we create an entry at "prefix/N" which uses
+ // encryption key N to provide the N+1 key. The standby instances scan
+ // for this periodically and refresh their keyring. The upgrade keys
+ // are deleted after a few minutes, but this provides enough time for the
+ // standby instances to upgrade without causing any disruption.
+ keyringUpgradePrefix = "core/upgrade/"
+
+ // masterKeyPath is the location of the master key. This is encrypted
+ // by the latest key in the keyring. This is only used by standby instances
+ // to handle the case of a rekey. If the active instance does a rekey,
+ // the standby instances can no longer reload the keyring since they
+ // have the old master key. This key can be decrypted if you have the
+ // keyring to discover the new master key. The new master key is then
+ // used to reload the keyring itself.
+ masterKeyPath = "core/master"
+)
+
+// SecurityBarrier is a critical component of Vault. It is used to wrap
+// an untrusted physical backend and provide a single point of encryption,
+// decryption and checksum verification. The goal is to ensure that any
+// data written to the barrier is confidential and that integrity is preserved.
+// As a real-world analogy, this is the steel and concrete wrapper around
+// a Vault. The barrier should only be Unlockable given its key.
+type SecurityBarrier interface {
+ // Initialized checks if the barrier has been initialized
+ // and has a master key set.
+ Initialized() (bool, error)
+
+ // Initialize works only if the barrier has not been initialized
+ // and makes use of the given master key.
+ Initialize([]byte) error
+
+ // GenerateKey is used to generate a new key
+ GenerateKey() ([]byte, error)
+
+ // KeyLength is used to sanity check a key
+ KeyLength() (int, int)
+
+ // Sealed checks if the barrier has been unlocked yet. The Barrier
+ // is not expected to be able to perform any CRUD until it is unsealed.
+ Sealed() (bool, error)
+
+ // Unseal is used to provide the master key which permits the barrier
+ // to be unsealed. If the key is not correct, the barrier remains sealed.
+ Unseal(key []byte) error
+
+ // VerifyMaster is used to check if the given key matches the master key
+ VerifyMaster(key []byte) error
+
+ // SetMasterKey is used to directly set a new master key. This is used in
+ // repliated scenarios due to the chicken and egg problem of reloading the
+ // keyring from disk before we have the master key to decrypt it.
+ SetMasterKey(key []byte) error
+
+ // ReloadKeyring is used to re-read the underlying keyring.
+ // This is used for HA deployments to ensure the latest keyring
+ // is present in the leader.
+ ReloadKeyring() error
+
+ // ReloadMasterKey is used to re-read the underlying masterkey.
+ // This is used for HA deployments to ensure the latest master key
+ // is available for keyring reloading.
+ ReloadMasterKey() error
+
+ // Seal is used to re-seal the barrier. This requires the barrier to
+ // be unsealed again to perform any further operations.
+ Seal() error
+
+ // Rotate is used to create a new encryption key. All future writes
+ // should use the new key, while old values should still be decryptable.
+ Rotate() (uint32, error)
+
+ // CreateUpgrade creates an upgrade path key to the given term from the previous term
+ CreateUpgrade(term uint32) error
+
+ // DestroyUpgrade destroys the upgrade path key to the given term
+ DestroyUpgrade(term uint32) error
+
+ // CheckUpgrade looks for an upgrade to the current term and installs it
+ CheckUpgrade() (bool, uint32, error)
+
+ // ActiveKeyInfo is used to inform details about the active key
+ ActiveKeyInfo() (*KeyInfo, error)
+
+ // Rekey is used to change the master key used to protect the keyring
+ Rekey([]byte) error
+
+ // For replication we must send over the keyring, so this must be available
+ Keyring() (*Keyring, error)
+
+ // SecurityBarrier must provide the storage APIs
+ BarrierStorage
+
+ // SecurityBarrier must provide the encryption APIs
+ BarrierEncryptor
+}
+
+// BarrierStorage is the storage only interface required for a Barrier.
+type BarrierStorage interface {
+ // Put is used to insert or update an entry
+ Put(entry *Entry) error
+
+ // Get is used to fetch an entry
+ Get(key string) (*Entry, error)
+
+ // Delete is used to permanently delete an entry
+ Delete(key string) error
+
+ // List is used ot list all the keys under a given
+ // prefix, up to the next prefix.
+ List(prefix string) ([]string, error)
+}
+
+// BarrierEncryptor is the in memory only interface that does not actually
+// use the underlying barrier. It is used for lower level modules like the
+// Write-Ahead-Log and Merkle index to allow them to use the barrier.
+type BarrierEncryptor interface {
+ Encrypt(key string, plaintext []byte) ([]byte, error)
+ Decrypt(key string, ciphertext []byte) ([]byte, error)
+}
+
+// Entry is used to represent data stored by the security barrier
+type Entry struct {
+ Key string
+ Value []byte
+}
+
+// Logical turns the Entry into a logical storage entry.
+func (e *Entry) Logical() *logical.StorageEntry {
+ return &logical.StorageEntry{
+ Key: e.Key,
+ Value: e.Value,
+ }
+}
+
+// KeyInfo is used to convey information about the encryption key
+type KeyInfo struct {
+ Term int
+ InstallTime time.Time
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/barrier_aes_gcm.go b/vendor/github.com/hashicorp/vault/vault/barrier_aes_gcm.go
new file mode 100644
index 0000000..37c191b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/barrier_aes_gcm.go
@@ -0,0 +1,886 @@
+package vault
+
+import (
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/rand"
+ "crypto/subtle"
+ "encoding/binary"
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/armon/go-metrics"
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/physical"
+)
+
+const (
+ // initialKeyTerm is the hard coded initial key term. This is
+ // used only for values that are not encrypted with the keyring.
+ initialKeyTerm = 1
+
+ // termSize the number of bytes used for the key term.
+ termSize = 4
+)
+
+// Versions of the AESGCM storage methodology
+const (
+ AESGCMVersion1 = 0x1
+ AESGCMVersion2 = 0x2
+)
+
+// barrierInit is the JSON encoded value stored
+type barrierInit struct {
+ Version int // Version is the current format version
+ Key []byte // Key is the primary encryption key
+}
+
+// AESGCMBarrier is a SecurityBarrier implementation that uses the AES
+// cipher core and the Galois Counter Mode block mode. It defaults to
+// the golang NONCE default value of 12 and a key size of 256
+// bit. AES-GCM is high performance, and provides both confidentiality
+// and integrity.
+type AESGCMBarrier struct {
+ backend physical.Backend
+
+ l sync.RWMutex
+ sealed bool
+
+ // keyring is used to maintain all of the encryption keys, including
+ // the active key used for encryption, but also prior keys to allow
+ // decryption of keys encrypted under previous terms.
+ keyring *Keyring
+
+ // cache is used to reduce the number of AEAD constructions we do
+ cache map[uint32]cipher.AEAD
+ cacheLock sync.RWMutex
+
+ // currentAESGCMVersionByte is prefixed to a message to allow for
+ // future versioning of barrier implementations. It's var instead
+ // of const to allow for testing
+ currentAESGCMVersionByte byte
+}
+
+// NewAESGCMBarrier is used to construct a new barrier that uses
+// the provided physical backend for storage.
+func NewAESGCMBarrier(physical physical.Backend) (*AESGCMBarrier, error) {
+ b := &AESGCMBarrier{
+ backend: physical,
+ sealed: true,
+ cache: make(map[uint32]cipher.AEAD),
+ currentAESGCMVersionByte: byte(AESGCMVersion2),
+ }
+ return b, nil
+}
+
+// Initialized checks if the barrier has been initialized
+// and has a master key set.
+func (b *AESGCMBarrier) Initialized() (bool, error) {
+ // Read the keyring file
+ out, err := b.backend.Get(keyringPath)
+ if err != nil {
+ return false, fmt.Errorf("failed to check for initialization: %v", err)
+ }
+ if out != nil {
+ return true, nil
+ }
+
+ // Fallback, check for the old sentinel file
+ out, err = b.backend.Get(barrierInitPath)
+ if err != nil {
+ return false, fmt.Errorf("failed to check for initialization: %v", err)
+ }
+ return out != nil, nil
+}
+
+// Initialize works only if the barrier has not been initialized
+// and makes use of the given master key.
+func (b *AESGCMBarrier) Initialize(key []byte) error {
+ // Verify the key size
+ min, max := b.KeyLength()
+ if len(key) < min || len(key) > max {
+ return fmt.Errorf("Key size must be %d or %d", min, max)
+ }
+
+ // Check if already initialized
+ if alreadyInit, err := b.Initialized(); err != nil {
+ return err
+ } else if alreadyInit {
+ return ErrBarrierAlreadyInit
+ }
+
+ // Generate encryption key
+ encrypt, err := b.GenerateKey()
+ if err != nil {
+ return fmt.Errorf("failed to generate encryption key: %v", err)
+ }
+
+ // Create a new keyring, install the keys
+ keyring := NewKeyring()
+ keyring = keyring.SetMasterKey(key)
+ keyring, err = keyring.AddKey(&Key{
+ Term: 1,
+ Version: 1,
+ Value: encrypt,
+ })
+ if err != nil {
+ return fmt.Errorf("failed to create keyring: %v", err)
+ }
+ return b.persistKeyring(keyring)
+}
+
+// persistKeyring is used to write out the keyring using the
+// master key to encrypt it.
+func (b *AESGCMBarrier) persistKeyring(keyring *Keyring) error {
+ // Create the keyring entry
+ keyringBuf, err := keyring.Serialize()
+ defer memzero(keyringBuf)
+ if err != nil {
+ return fmt.Errorf("failed to serialize keyring: %v", err)
+ }
+
+ // Create the AES-GCM
+ gcm, err := b.aeadFromKey(keyring.MasterKey())
+ if err != nil {
+ return err
+ }
+
+ // Encrypt the barrier init value
+ value := b.encrypt(keyringPath, initialKeyTerm, gcm, keyringBuf)
+
+ // Create the keyring physical entry
+ pe := &physical.Entry{
+ Key: keyringPath,
+ Value: value,
+ }
+ if err := b.backend.Put(pe); err != nil {
+ return fmt.Errorf("failed to persist keyring: %v", err)
+ }
+
+ // Serialize the master key value
+ key := &Key{
+ Term: 1,
+ Version: 1,
+ Value: keyring.MasterKey(),
+ }
+ keyBuf, err := key.Serialize()
+ defer memzero(keyBuf)
+ if err != nil {
+ return fmt.Errorf("failed to serialize master key: %v", err)
+ }
+
+ // Encrypt the master key
+ activeKey := keyring.ActiveKey()
+ aead, err := b.aeadFromKey(activeKey.Value)
+ if err != nil {
+ return err
+ }
+ value = b.encrypt(masterKeyPath, activeKey.Term, aead, keyBuf)
+
+ // Update the masterKeyPath for standby instances
+ pe = &physical.Entry{
+ Key: masterKeyPath,
+ Value: value,
+ }
+ if err := b.backend.Put(pe); err != nil {
+ return fmt.Errorf("failed to persist master key: %v", err)
+ }
+ return nil
+}
+
+// GenerateKey is used to generate a new key
+func (b *AESGCMBarrier) GenerateKey() ([]byte, error) {
+ // Generate a 256bit key
+ buf := make([]byte, 2*aes.BlockSize)
+ _, err := rand.Read(buf)
+ return buf, err
+}
+
+// KeyLength is used to sanity check a key
+func (b *AESGCMBarrier) KeyLength() (int, int) {
+ return aes.BlockSize, 2 * aes.BlockSize
+}
+
+// Sealed checks if the barrier has been unlocked yet. The Barrier
+// is not expected to be able to perform any CRUD until it is unsealed.
+func (b *AESGCMBarrier) Sealed() (bool, error) {
+ b.l.RLock()
+ defer b.l.RUnlock()
+ return b.sealed, nil
+}
+
+// VerifyMaster is used to check if the given key matches the master key
+func (b *AESGCMBarrier) VerifyMaster(key []byte) error {
+ b.l.RLock()
+ defer b.l.RUnlock()
+ if b.sealed {
+ return ErrBarrierSealed
+ }
+ if subtle.ConstantTimeCompare(key, b.keyring.MasterKey()) != 1 {
+ return ErrBarrierInvalidKey
+ }
+ return nil
+}
+
+// ReloadKeyring is used to re-read the underlying keyring.
+// This is used for HA deployments to ensure the latest keyring
+// is present in the leader.
+func (b *AESGCMBarrier) ReloadKeyring() error {
+ b.l.Lock()
+ defer b.l.Unlock()
+
+ // Create the AES-GCM
+ gcm, err := b.aeadFromKey(b.keyring.MasterKey())
+ if err != nil {
+ return err
+ }
+
+ // Read in the keyring
+ out, err := b.backend.Get(keyringPath)
+ if err != nil {
+ return fmt.Errorf("failed to check for keyring: %v", err)
+ }
+
+ // Ensure that the keyring exists. This should never happen,
+ // and indicates something really bad has happened.
+ if out == nil {
+ return fmt.Errorf("keyring unexpectedly missing")
+ }
+
+ // Decrypt the barrier init key
+ plain, err := b.decrypt(keyringPath, gcm, out.Value)
+ defer memzero(plain)
+ if err != nil {
+ if strings.Contains(err.Error(), "message authentication failed") {
+ return ErrBarrierInvalidKey
+ }
+ return err
+ }
+
+ // Recover the keyring
+ keyring, err := DeserializeKeyring(plain)
+ if err != nil {
+ return fmt.Errorf("keyring deserialization failed: %v", err)
+ }
+
+ // Setup the keyring and finish
+ b.keyring = keyring
+ return nil
+}
+
+// ReloadMasterKey is used to re-read the underlying masterkey.
+// This is used for HA deployments to ensure the latest master key
+// is available for keyring reloading.
+func (b *AESGCMBarrier) ReloadMasterKey() error {
+ // Read the masterKeyPath upgrade
+ out, err := b.Get(masterKeyPath)
+ if err != nil {
+ return fmt.Errorf("failed to read master key path: %v", err)
+ }
+
+ // The masterKeyPath could be missing (backwards incompatible),
+ // we can ignore this and attempt to make progress with the current
+ // master key.
+ if out == nil {
+ return nil
+ }
+
+ defer memzero(out.Value)
+
+ // Deserialize the master key
+ key, err := DeserializeKey(out.Value)
+ if err != nil {
+ return fmt.Errorf("failed to deserialize key: %v", err)
+ }
+
+ b.l.Lock()
+ defer b.l.Unlock()
+
+ // Check if the master key is the same
+ if subtle.ConstantTimeCompare(b.keyring.MasterKey(), key.Value) == 1 {
+ return nil
+ }
+
+ // Update the master key
+ oldKeyring := b.keyring
+ b.keyring = b.keyring.SetMasterKey(key.Value)
+ oldKeyring.Zeroize(false)
+ return nil
+}
+
+// Unseal is used to provide the master key which permits the barrier
+// to be unsealed. If the key is not correct, the barrier remains sealed.
+func (b *AESGCMBarrier) Unseal(key []byte) error {
+ b.l.Lock()
+ defer b.l.Unlock()
+
+ // Do nothing if already unsealed
+ if !b.sealed {
+ return nil
+ }
+
+ // Create the AES-GCM
+ gcm, err := b.aeadFromKey(key)
+ if err != nil {
+ return err
+ }
+
+ // Read in the keyring
+ out, err := b.backend.Get(keyringPath)
+ if err != nil {
+ return fmt.Errorf("failed to check for keyring: %v", err)
+ }
+ if out != nil {
+ // Decrypt the barrier init key
+ plain, err := b.decrypt(keyringPath, gcm, out.Value)
+ defer memzero(plain)
+ if err != nil {
+ if strings.Contains(err.Error(), "message authentication failed") {
+ return ErrBarrierInvalidKey
+ }
+ return err
+ }
+
+ // Recover the keyring
+ keyring, err := DeserializeKeyring(plain)
+ if err != nil {
+ return fmt.Errorf("keyring deserialization failed: %v", err)
+ }
+
+ // Setup the keyring and finish
+ b.keyring = keyring
+ b.sealed = false
+ return nil
+ }
+
+ // Read the barrier initialization key
+ out, err = b.backend.Get(barrierInitPath)
+ if err != nil {
+ return fmt.Errorf("failed to check for initialization: %v", err)
+ }
+ if out == nil {
+ return ErrBarrierNotInit
+ }
+
+ // Decrypt the barrier init key
+ plain, err := b.decrypt(barrierInitPath, gcm, out.Value)
+ if err != nil {
+ if strings.Contains(err.Error(), "message authentication failed") {
+ return ErrBarrierInvalidKey
+ }
+ return err
+ }
+ defer memzero(plain)
+
+ // Unmarshal the barrier init
+ var init barrierInit
+ if err := jsonutil.DecodeJSON(plain, &init); err != nil {
+ return fmt.Errorf("failed to unmarshal barrier init file")
+ }
+
+ // Setup a new keyring, this is for backwards compatibility
+ keyringNew := NewKeyring()
+ keyring := keyringNew.SetMasterKey(key)
+
+ // AddKey reuses the master, so we are only zeroizing after this call
+ defer keyringNew.Zeroize(false)
+
+ keyring, err = keyring.AddKey(&Key{
+ Term: 1,
+ Version: 1,
+ Value: init.Key,
+ })
+ if err != nil {
+ return fmt.Errorf("failed to create keyring: %v", err)
+ }
+ if err := b.persistKeyring(keyring); err != nil {
+ return err
+ }
+
+ // Delete the old barrier entry
+ if err := b.backend.Delete(barrierInitPath); err != nil {
+ return fmt.Errorf("failed to delete barrier init file: %v", err)
+ }
+
+ // Set the vault as unsealed
+ b.keyring = keyring
+ b.sealed = false
+ return nil
+}
+
+// Seal is used to re-seal the barrier. This requires the barrier to
+// be unsealed again to perform any further operations.
+func (b *AESGCMBarrier) Seal() error {
+ b.l.Lock()
+ defer b.l.Unlock()
+
+ // Remove the primary key, and seal the vault
+ b.cache = make(map[uint32]cipher.AEAD)
+ b.keyring.Zeroize(true)
+ b.keyring = nil
+ b.sealed = true
+ return nil
+}
+
+// Rotate is used to create a new encryption key. All future writes
+// should use the new key, while old values should still be decryptable.
+func (b *AESGCMBarrier) Rotate() (uint32, error) {
+ b.l.Lock()
+ defer b.l.Unlock()
+ if b.sealed {
+ return 0, ErrBarrierSealed
+ }
+
+ // Generate a new key
+ encrypt, err := b.GenerateKey()
+ if err != nil {
+ return 0, fmt.Errorf("failed to generate encryption key: %v", err)
+ }
+
+ // Get the next term
+ term := b.keyring.ActiveTerm()
+ newTerm := term + 1
+
+ // Add a new encryption key
+ newKeyring, err := b.keyring.AddKey(&Key{
+ Term: newTerm,
+ Version: 1,
+ Value: encrypt,
+ })
+ if err != nil {
+ return 0, fmt.Errorf("failed to add new encryption key: %v", err)
+ }
+
+ // Persist the new keyring
+ if err := b.persistKeyring(newKeyring); err != nil {
+ return 0, err
+ }
+
+ // Swap the keyrings
+ b.keyring = newKeyring
+ return newTerm, nil
+}
+
+// CreateUpgrade creates an upgrade path key to the given term from the previous term
+func (b *AESGCMBarrier) CreateUpgrade(term uint32) error {
+ b.l.RLock()
+ defer b.l.RUnlock()
+ if b.sealed {
+ return ErrBarrierSealed
+ }
+
+ // Get the key for this term
+ termKey := b.keyring.TermKey(term)
+ buf, err := termKey.Serialize()
+ defer memzero(buf)
+ if err != nil {
+ return err
+ }
+
+ // Get the AEAD for the previous term
+ prevTerm := term - 1
+ primary, err := b.aeadForTerm(prevTerm)
+ if err != nil {
+ return err
+ }
+
+ key := fmt.Sprintf("%s%d", keyringUpgradePrefix, prevTerm)
+ value := b.encrypt(key, prevTerm, primary, buf)
+ // Create upgrade key
+ pe := &physical.Entry{
+ Key: key,
+ Value: value,
+ }
+ return b.backend.Put(pe)
+}
+
+// DestroyUpgrade destroys the upgrade path key to the given term
+func (b *AESGCMBarrier) DestroyUpgrade(term uint32) error {
+ path := fmt.Sprintf("%s%d", keyringUpgradePrefix, term-1)
+ return b.Delete(path)
+}
+
+// CheckUpgrade looks for an upgrade to the current term and installs it
+func (b *AESGCMBarrier) CheckUpgrade() (bool, uint32, error) {
+ b.l.RLock()
+ defer b.l.RUnlock()
+ if b.sealed {
+ return false, 0, ErrBarrierSealed
+ }
+
+ // Get the current term
+ activeTerm := b.keyring.ActiveTerm()
+
+ // Check for an upgrade key
+ upgrade := fmt.Sprintf("%s%d", keyringUpgradePrefix, activeTerm)
+ entry, err := b.Get(upgrade)
+ if err != nil {
+ return false, 0, err
+ }
+
+ // Nothing to do if no upgrade
+ if entry == nil {
+ return false, 0, nil
+ }
+
+ defer memzero(entry.Value)
+
+ // Deserialize the key
+ key, err := DeserializeKey(entry.Value)
+ if err != nil {
+ return false, 0, err
+ }
+
+ // Upgrade from read lock to write lock
+ b.l.RUnlock()
+ defer b.l.RLock()
+ b.l.Lock()
+ defer b.l.Unlock()
+
+ // Update the keyring
+ newKeyring, err := b.keyring.AddKey(key)
+ if err != nil {
+ return false, 0, fmt.Errorf("failed to add new encryption key: %v", err)
+ }
+ b.keyring = newKeyring
+
+ // Done!
+ return true, key.Term, nil
+}
+
+// ActiveKeyInfo is used to inform details about the active key
+func (b *AESGCMBarrier) ActiveKeyInfo() (*KeyInfo, error) {
+ b.l.RLock()
+ defer b.l.RUnlock()
+ if b.sealed {
+ return nil, ErrBarrierSealed
+ }
+
+ // Determine the key install time
+ term := b.keyring.ActiveTerm()
+ key := b.keyring.TermKey(term)
+
+ // Return the key info
+ info := &KeyInfo{
+ Term: int(term),
+ InstallTime: key.InstallTime,
+ }
+ return info, nil
+}
+
+// Rekey is used to change the master key used to protect the keyring
+func (b *AESGCMBarrier) Rekey(key []byte) error {
+ b.l.Lock()
+ defer b.l.Unlock()
+
+ newKeyring, err := b.updateMasterKeyCommon(key)
+ if err != nil {
+ return err
+ }
+
+ // Persist the new keyring
+ if err := b.persistKeyring(newKeyring); err != nil {
+ return err
+ }
+
+ // Swap the keyrings
+ oldKeyring := b.keyring
+ b.keyring = newKeyring
+ oldKeyring.Zeroize(false)
+ return nil
+}
+
+// SetMasterKey updates the keyring's in-memory master key but does not persist
+// anything to storage
+func (b *AESGCMBarrier) SetMasterKey(key []byte) error {
+ b.l.Lock()
+ defer b.l.Unlock()
+
+ newKeyring, err := b.updateMasterKeyCommon(key)
+ if err != nil {
+ return err
+ }
+
+ // Swap the keyrings
+ oldKeyring := b.keyring
+ b.keyring = newKeyring
+ oldKeyring.Zeroize(false)
+ return nil
+}
+
+// Performs common tasks related to updating the master key; note that the lock
+// must be held before calling this function
+func (b *AESGCMBarrier) updateMasterKeyCommon(key []byte) (*Keyring, error) {
+ if b.sealed {
+ return nil, ErrBarrierSealed
+ }
+
+ // Verify the key size
+ min, max := b.KeyLength()
+ if len(key) < min || len(key) > max {
+ return nil, fmt.Errorf("Key size must be %d or %d", min, max)
+ }
+
+ return b.keyring.SetMasterKey(key), nil
+}
+
+// Put is used to insert or update an entry
+func (b *AESGCMBarrier) Put(entry *Entry) error {
+ defer metrics.MeasureSince([]string{"barrier", "put"}, time.Now())
+ b.l.RLock()
+ defer b.l.RUnlock()
+ if b.sealed {
+ return ErrBarrierSealed
+ }
+
+ term := b.keyring.ActiveTerm()
+ primary, err := b.aeadForTerm(term)
+ if err != nil {
+ return err
+ }
+
+ pe := &physical.Entry{
+ Key: entry.Key,
+ Value: b.encrypt(entry.Key, term, primary, entry.Value),
+ }
+ return b.backend.Put(pe)
+}
+
+// Get is used to fetch an entry
+func (b *AESGCMBarrier) Get(key string) (*Entry, error) {
+ defer metrics.MeasureSince([]string{"barrier", "get"}, time.Now())
+ b.l.RLock()
+ defer b.l.RUnlock()
+ if b.sealed {
+ return nil, ErrBarrierSealed
+ }
+
+ // Read the key from the backend
+ pe, err := b.backend.Get(key)
+ if err != nil {
+ return nil, err
+ } else if pe == nil {
+ return nil, nil
+ }
+
+ // Decrypt the ciphertext
+ plain, err := b.decryptKeyring(key, pe.Value)
+ if err != nil {
+ return nil, fmt.Errorf("decryption failed: %v", err)
+ }
+
+ // Wrap in a logical entry
+ entry := &Entry{
+ Key: key,
+ Value: plain,
+ }
+ return entry, nil
+}
+
+// Delete is used to permanently delete an entry
+func (b *AESGCMBarrier) Delete(key string) error {
+ defer metrics.MeasureSince([]string{"barrier", "delete"}, time.Now())
+ b.l.RLock()
+ defer b.l.RUnlock()
+ if b.sealed {
+ return ErrBarrierSealed
+ }
+
+ return b.backend.Delete(key)
+}
+
+// List is used ot list all the keys under a given
+// prefix, up to the next prefix.
+func (b *AESGCMBarrier) List(prefix string) ([]string, error) {
+ defer metrics.MeasureSince([]string{"barrier", "list"}, time.Now())
+ b.l.RLock()
+ defer b.l.RUnlock()
+ if b.sealed {
+ return nil, ErrBarrierSealed
+ }
+
+ return b.backend.List(prefix)
+}
+
+// aeadForTerm returns the AES-GCM AEAD for the given term
+func (b *AESGCMBarrier) aeadForTerm(term uint32) (cipher.AEAD, error) {
+ // Check for the keyring
+ keyring := b.keyring
+ if keyring == nil {
+ return nil, nil
+ }
+
+ // Check the cache for the aead
+ b.cacheLock.RLock()
+ aead, ok := b.cache[term]
+ b.cacheLock.RUnlock()
+ if ok {
+ return aead, nil
+ }
+
+ // Read the underlying key
+ key := keyring.TermKey(term)
+ if key == nil {
+ return nil, nil
+ }
+
+ // Create a new aead
+ aead, err := b.aeadFromKey(key.Value)
+ if err != nil {
+ return nil, err
+ }
+
+ // Update the cache
+ b.cacheLock.Lock()
+ b.cache[term] = aead
+ b.cacheLock.Unlock()
+ return aead, nil
+}
+
+// aeadFromKey returns an AES-GCM AEAD using the given key.
+func (b *AESGCMBarrier) aeadFromKey(key []byte) (cipher.AEAD, error) {
+ // Create the AES cipher
+ aesCipher, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create cipher: %v", err)
+ }
+
+ // Create the GCM mode AEAD
+ gcm, err := cipher.NewGCM(aesCipher)
+ if err != nil {
+ return nil, fmt.Errorf("failed to initialize GCM mode")
+ }
+ return gcm, nil
+}
+
+// encrypt is used to encrypt a value
+func (b *AESGCMBarrier) encrypt(path string, term uint32, gcm cipher.AEAD, plain []byte) []byte {
+ // Allocate the output buffer with room for tern, version byte,
+ // nonce, GCM tag and the plaintext
+ capacity := termSize + 1 + gcm.NonceSize() + gcm.Overhead() + len(plain)
+ size := termSize + 1 + gcm.NonceSize()
+ out := make([]byte, size, capacity)
+
+ // Set the key term
+ binary.BigEndian.PutUint32(out[:4], term)
+
+ // Set the version byte
+ out[4] = b.currentAESGCMVersionByte
+
+ // Generate a random nonce
+ nonce := out[5 : 5+gcm.NonceSize()]
+ rand.Read(nonce)
+
+ // Seal the output
+ switch b.currentAESGCMVersionByte {
+ case AESGCMVersion1:
+ out = gcm.Seal(out, nonce, plain, nil)
+ case AESGCMVersion2:
+ out = gcm.Seal(out, nonce, plain, []byte(path))
+ default:
+ panic("Unknown AESGCM version")
+ }
+
+ return out
+}
+
+// decrypt is used to decrypt a value
+func (b *AESGCMBarrier) decrypt(path string, gcm cipher.AEAD, cipher []byte) ([]byte, error) {
+ // Verify the term is always just one
+ term := binary.BigEndian.Uint32(cipher[:4])
+ if term != initialKeyTerm {
+ return nil, fmt.Errorf("term mis-match")
+ }
+
+ // Capture the parts
+ nonce := cipher[5 : 5+gcm.NonceSize()]
+ raw := cipher[5+gcm.NonceSize():]
+ out := make([]byte, 0, len(raw)-gcm.NonceSize())
+
+ // Verify the cipher byte and attempt to open
+ switch cipher[4] {
+ case AESGCMVersion1:
+ return gcm.Open(out, nonce, raw, nil)
+ case AESGCMVersion2:
+ return gcm.Open(out, nonce, raw, []byte(path))
+ default:
+ return nil, fmt.Errorf("version bytes mis-match")
+ }
+}
+
+// decryptKeyring is used to decrypt a value using the keyring
+func (b *AESGCMBarrier) decryptKeyring(path string, cipher []byte) ([]byte, error) {
+ // Verify the term
+ term := binary.BigEndian.Uint32(cipher[:4])
+
+ // Get the GCM by term
+ // It is expensive to do this first but it is not a
+ // normal case that this won't match
+ gcm, err := b.aeadForTerm(term)
+ if err != nil {
+ return nil, err
+ }
+ if gcm == nil {
+ return nil, fmt.Errorf("no decryption key available for term %d", term)
+ }
+
+ nonce := cipher[5 : 5+gcm.NonceSize()]
+ raw := cipher[5+gcm.NonceSize():]
+ out := make([]byte, 0, len(raw)-gcm.NonceSize())
+
+ // Attempt to open
+ switch cipher[4] {
+ case AESGCMVersion1:
+ return gcm.Open(out, nonce, raw, nil)
+ case AESGCMVersion2:
+ return gcm.Open(out, nonce, raw, []byte(path))
+ default:
+ return nil, fmt.Errorf("version bytes mis-match")
+ }
+}
+
+// Encrypt is used to encrypt in-memory for the BarrierEncryptor interface
+func (b *AESGCMBarrier) Encrypt(key string, plaintext []byte) ([]byte, error) {
+ b.l.RLock()
+ defer b.l.RUnlock()
+ if b.sealed {
+ return nil, ErrBarrierSealed
+ }
+
+ term := b.keyring.ActiveTerm()
+ primary, err := b.aeadForTerm(term)
+ if err != nil {
+ return nil, err
+ }
+
+ ciphertext := b.encrypt(key, term, primary, plaintext)
+ return ciphertext, nil
+}
+
+// Decrypt is used to decrypt in-memory for the BarrierEncryptor interface
+func (b *AESGCMBarrier) Decrypt(key string, ciphertext []byte) ([]byte, error) {
+ b.l.RLock()
+ defer b.l.RUnlock()
+ if b.sealed {
+ return nil, ErrBarrierSealed
+ }
+
+ // Decrypt the ciphertext
+ plain, err := b.decryptKeyring(key, ciphertext)
+ if err != nil {
+ return nil, fmt.Errorf("decryption failed: %v", err)
+ }
+ return plain, nil
+}
+
+func (b *AESGCMBarrier) Keyring() (*Keyring, error) {
+ b.l.RLock()
+ defer b.l.RUnlock()
+ if b.sealed {
+ return nil, ErrBarrierSealed
+ }
+
+ return b.keyring.Clone(), nil
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/barrier_aes_gcm_test.go b/vendor/github.com/hashicorp/vault/vault/barrier_aes_gcm_test.go
new file mode 100644
index 0000000..7d575ce
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/barrier_aes_gcm_test.go
@@ -0,0 +1,462 @@
+package vault
+
+import (
+ "bytes"
+ "encoding/json"
+ "testing"
+
+ "github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/physical"
+ log "github.com/mgutz/logxi/v1"
+)
+
+var (
+ logger = logformat.NewVaultLogger(log.LevelTrace)
+)
+
+// mockBarrier returns a physical backend, security barrier, and master key
+func mockBarrier(t testing.TB) (physical.Backend, SecurityBarrier, []byte) {
+
+ inm := physical.NewInmem(logger)
+ b, err := NewAESGCMBarrier(inm)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Initialize and unseal
+ key, _ := b.GenerateKey()
+ b.Initialize(key)
+ b.Unseal(key)
+ return inm, b, key
+}
+
+func TestAESGCMBarrier_Basic(t *testing.T) {
+
+ inm := physical.NewInmem(logger)
+ b, err := NewAESGCMBarrier(inm)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ testBarrier(t, b)
+}
+
+func TestAESGCMBarrier_Rotate(t *testing.T) {
+
+ inm := physical.NewInmem(logger)
+ b, err := NewAESGCMBarrier(inm)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ testBarrier_Rotate(t, b)
+}
+
+func TestAESGCMBarrier_Upgrade(t *testing.T) {
+
+ inm := physical.NewInmem(logger)
+ b1, err := NewAESGCMBarrier(inm)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ b2, err := NewAESGCMBarrier(inm)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ testBarrier_Upgrade(t, b1, b2)
+}
+
+func TestAESGCMBarrier_Upgrade_Rekey(t *testing.T) {
+
+ inm := physical.NewInmem(logger)
+ b1, err := NewAESGCMBarrier(inm)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ b2, err := NewAESGCMBarrier(inm)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ testBarrier_Upgrade_Rekey(t, b1, b2)
+}
+
+func TestAESGCMBarrier_Rekey(t *testing.T) {
+
+ inm := physical.NewInmem(logger)
+ b, err := NewAESGCMBarrier(inm)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ testBarrier_Rekey(t, b)
+}
+
+// Test an upgrade from the old (0.1) barrier/init to the new
+// core/keyring style
+func TestAESGCMBarrier_BackwardsCompatible(t *testing.T) {
+
+ inm := physical.NewInmem(logger)
+ b, err := NewAESGCMBarrier(inm)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Generate a barrier/init entry
+ encrypt, _ := b.GenerateKey()
+ init := &barrierInit{
+ Version: 1,
+ Key: encrypt,
+ }
+ buf, _ := json.Marshal(init)
+
+ // Protect with master key
+ master, _ := b.GenerateKey()
+ gcm, _ := b.aeadFromKey(master)
+ value := b.encrypt(barrierInitPath, initialKeyTerm, gcm, buf)
+
+ // Write to the physical backend
+ pe := &physical.Entry{
+ Key: barrierInitPath,
+ Value: value,
+ }
+ inm.Put(pe)
+
+ // Create a fake key
+ gcm, _ = b.aeadFromKey(encrypt)
+ pe = &physical.Entry{
+ Key: "test/foo",
+ Value: b.encrypt("test/foo", initialKeyTerm, gcm, []byte("test")),
+ }
+ inm.Put(pe)
+
+ // Should still be initialized
+ isInit, err := b.Initialized()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !isInit {
+ t.Fatalf("should be initialized")
+ }
+
+ // Unseal should work and migrate online
+ err = b.Unseal(master)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Check for migraiton
+ out, err := inm.Get(barrierInitPath)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out != nil {
+ t.Fatalf("should delete old barrier init")
+ }
+
+ // Should have keyring
+ out, err = inm.Get(keyringPath)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out == nil {
+ t.Fatalf("should have keyring file")
+ }
+
+ // Attempt to read encrypted key
+ entry, err := b.Get("test/foo")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if string(entry.Value) != "test" {
+ t.Fatalf("bad: %#v", entry)
+ }
+}
+
+// Verify data sent through is encrypted
+func TestAESGCMBarrier_Confidential(t *testing.T) {
+
+ inm := physical.NewInmem(logger)
+ b, err := NewAESGCMBarrier(inm)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Initialize and unseal
+ key, _ := b.GenerateKey()
+ b.Initialize(key)
+ b.Unseal(key)
+
+ // Put a logical entry
+ entry := &Entry{Key: "test", Value: []byte("test")}
+ err = b.Put(entry)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Check the physcial entry
+ pe, err := inm.Get("test")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if pe == nil {
+ t.Fatalf("missing physical entry")
+ }
+
+ if pe.Key != "test" {
+ t.Fatalf("bad: %#v", pe)
+ }
+ if bytes.Equal(pe.Value, entry.Value) {
+ t.Fatalf("bad: %#v", pe)
+ }
+}
+
+// Verify data sent through cannot be tampered with
+func TestAESGCMBarrier_Integrity(t *testing.T) {
+
+ inm := physical.NewInmem(logger)
+ b, err := NewAESGCMBarrier(inm)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Initialize and unseal
+ key, _ := b.GenerateKey()
+ b.Initialize(key)
+ b.Unseal(key)
+
+ // Put a logical entry
+ entry := &Entry{Key: "test", Value: []byte("test")}
+ err = b.Put(entry)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Change a byte in the underlying physical entry
+ pe, _ := inm.Get("test")
+ pe.Value[15]++
+ err = inm.Put(pe)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Read from the barrier
+ _, err = b.Get("test")
+ if err == nil {
+ t.Fatalf("should fail!")
+ }
+}
+
+// Verify data sent through cannot be moved
+func TestAESGCMBarrier_MoveIntegrityV1(t *testing.T) {
+
+ inm := physical.NewInmem(logger)
+ b, err := NewAESGCMBarrier(inm)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ b.currentAESGCMVersionByte = AESGCMVersion1
+
+ // Initialize and unseal
+ key, _ := b.GenerateKey()
+ err = b.Initialize(key)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ err = b.Unseal(key)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Put a logical entry
+ entry := &Entry{Key: "test", Value: []byte("test")}
+ err = b.Put(entry)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Change the location of the underlying physical entry
+ pe, _ := inm.Get("test")
+ pe.Key = "moved"
+ err = inm.Put(pe)
+
+ // Read from the barrier
+ _, err = b.Get("moved")
+ if err != nil {
+ t.Fatalf("should succeed with version 1!")
+ }
+}
+
+func TestAESGCMBarrier_MoveIntegrityV2(t *testing.T) {
+
+ inm := physical.NewInmem(logger)
+ b, err := NewAESGCMBarrier(inm)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ b.currentAESGCMVersionByte = AESGCMVersion2
+
+ // Initialize and unseal
+ key, _ := b.GenerateKey()
+ err = b.Initialize(key)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ err = b.Unseal(key)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Put a logical entry
+ entry := &Entry{Key: "test", Value: []byte("test")}
+ err = b.Put(entry)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Change the location of the underlying physical entry
+ pe, _ := inm.Get("test")
+ pe.Key = "moved"
+ err = inm.Put(pe)
+
+ // Read from the barrier
+ _, err = b.Get("moved")
+ if err == nil {
+ t.Fatalf("should fail with version 2!")
+ }
+}
+
+func TestAESGCMBarrier_UpgradeV1toV2(t *testing.T) {
+
+ inm := physical.NewInmem(logger)
+ b, err := NewAESGCMBarrier(inm)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ b.currentAESGCMVersionByte = AESGCMVersion1
+
+ // Initialize and unseal
+ key, _ := b.GenerateKey()
+ err = b.Initialize(key)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ err = b.Unseal(key)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Put a logical entry
+ entry := &Entry{Key: "test", Value: []byte("test")}
+ err = b.Put(entry)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Seal
+ err = b.Seal()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Open again as version 2
+ b, err = NewAESGCMBarrier(inm)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ b.currentAESGCMVersionByte = AESGCMVersion2
+
+ // Unseal
+ err = b.Unseal(key)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Check successful decryption
+ _, err = b.Get("test")
+ if err != nil {
+ t.Fatalf("Upgrade unsuccessful")
+ }
+}
+
+func TestEncrypt_Unique(t *testing.T) {
+
+ inm := physical.NewInmem(logger)
+ b, err := NewAESGCMBarrier(inm)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ key, _ := b.GenerateKey()
+ b.Initialize(key)
+ b.Unseal(key)
+
+ if b.keyring == nil {
+ t.Fatalf("barrier is sealed")
+ }
+
+ entry := &Entry{Key: "test", Value: []byte("test")}
+ term := b.keyring.ActiveTerm()
+ primary, _ := b.aeadForTerm(term)
+
+ first := b.encrypt("test", term, primary, entry.Value)
+ second := b.encrypt("test", term, primary, entry.Value)
+
+ if bytes.Equal(first, second) == true {
+ t.Fatalf("improper random seeding detected")
+ }
+}
+
+func TestInitialize_KeyLength(t *testing.T) {
+
+ inm := physical.NewInmem(logger)
+ b, err := NewAESGCMBarrier(inm)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ long := []byte("ThisKeyDoesNotHaveTheRightLength!")
+ middle := []byte("ThisIsASecretKeyAndMore")
+ short := []byte("Key")
+
+ err = b.Initialize(long)
+
+ if err == nil {
+ t.Fatalf("key length protection failed")
+ }
+
+ err = b.Initialize(middle)
+
+ if err == nil {
+ t.Fatalf("key length protection failed")
+ }
+
+ err = b.Initialize(short)
+
+ if err == nil {
+ t.Fatalf("key length protection failed")
+ }
+}
+
+func TestEncrypt_BarrierEncryptor(t *testing.T) {
+ inm := physical.NewInmem(logger)
+ b, err := NewAESGCMBarrier(inm)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Initialize and unseal
+ key, _ := b.GenerateKey()
+ b.Initialize(key)
+ b.Unseal(key)
+
+ cipher, err := b.Encrypt("foo", []byte("quick brown fox"))
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ plain, err := b.Decrypt("foo", cipher)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if string(plain) != "quick brown fox" {
+ t.Fatalf("bad: %s", plain)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/barrier_test.go b/vendor/github.com/hashicorp/vault/vault/barrier_test.go
new file mode 100644
index 0000000..e40c011
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/barrier_test.go
@@ -0,0 +1,531 @@
+package vault
+
+import (
+ "reflect"
+ "testing"
+ "time"
+)
+
+func testBarrier(t *testing.T, b SecurityBarrier) {
+ // Should not be initialized
+ init, err := b.Initialized()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if init {
+ t.Fatalf("should not be initialized")
+ }
+
+ // Should start sealed
+ sealed, err := b.Sealed()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !sealed {
+ t.Fatalf("should be sealed")
+ }
+
+ // Sealing should be a no-op
+ if err := b.Seal(); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // All operations should fail
+ e := &Entry{Key: "test", Value: []byte("test")}
+ if err := b.Put(e); err != ErrBarrierSealed {
+ t.Fatalf("err: %v", err)
+ }
+ if _, err := b.Get("test"); err != ErrBarrierSealed {
+ t.Fatalf("err: %v", err)
+ }
+ if err := b.Delete("test"); err != ErrBarrierSealed {
+ t.Fatalf("err: %v", err)
+ }
+ if _, err := b.List(""); err != ErrBarrierSealed {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Get a new key
+ key, err := b.GenerateKey()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Validate minimum key length
+ min, max := b.KeyLength()
+ if min < 16 {
+ t.Fatalf("minimum key size too small: %d", min)
+ }
+ if max < min {
+ t.Fatalf("maximum key size smaller than min")
+ }
+
+ // Unseal should not work
+ if err := b.Unseal(key); err != ErrBarrierNotInit {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Initialize the vault
+ if err := b.Initialize(key); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Double Initialize should fail
+ if err := b.Initialize(key); err != ErrBarrierAlreadyInit {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should be initialized
+ init, err = b.Initialized()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !init {
+ t.Fatalf("should be initialized")
+ }
+
+ // Should still be sealed
+ sealed, err = b.Sealed()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !sealed {
+ t.Fatalf("should sealed")
+ }
+
+ // Unseal should work
+ if err := b.Unseal(key); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Unseal should no-op when done twice
+ if err := b.Unseal(key); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should no longer be sealed
+ sealed, err = b.Sealed()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if sealed {
+ t.Fatalf("should be unsealed")
+ }
+
+ // Verify the master key
+ if err := b.VerifyMaster(key); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Operations should work
+ out, err := b.Get("test")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out != nil {
+ t.Fatalf("bad: %v", out)
+ }
+
+ // List should have only "core/"
+ keys, err := b.List("")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(keys) != 1 || keys[0] != "core/" {
+ t.Fatalf("bad: %v", keys)
+ }
+
+ // Try to write
+ if err := b.Put(e); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should be equal
+ out, err = b.Get("test")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !reflect.DeepEqual(out, e) {
+ t.Fatalf("bad: %v exp: %v", out, e)
+ }
+
+ // List should show the items
+ keys, err = b.List("")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(keys) != 2 {
+ t.Fatalf("bad: %v", keys)
+ }
+ if keys[0] != "core/" || keys[1] != "test" {
+ t.Fatalf("bad: %v", keys)
+ }
+
+ // Delete should clear
+ err = b.Delete("test")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Double Delete is fine
+ err = b.Delete("test")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should be nil
+ out, err = b.Get("test")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out != nil {
+ t.Fatalf("bad: %v", out)
+ }
+
+ // List should have nothing
+ keys, err = b.List("")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(keys) != 1 || keys[0] != "core/" {
+ t.Fatalf("bad: %v", keys)
+ }
+
+ // Add the item back
+ if err := b.Put(e); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Reseal should prevent any updates
+ if err := b.Seal(); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // No access allowed
+ if _, err := b.Get("test"); err != ErrBarrierSealed {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Unseal should work
+ if err := b.Unseal(key); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should be equal
+ out, err = b.Get("test")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !reflect.DeepEqual(out, e) {
+ t.Fatalf("bad: %v exp: %v", out, e)
+ }
+
+ // Final cleanup
+ err = b.Delete("test")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Reseal should prevent any updates
+ if err := b.Seal(); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Modify the key
+ key[0]++
+
+ // Unseal should fail
+ if err := b.Unseal(key); err != ErrBarrierInvalidKey {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func testBarrier_Rotate(t *testing.T, b SecurityBarrier) {
+ // Initialize the barrier
+ key, _ := b.GenerateKey()
+ b.Initialize(key)
+ err := b.Unseal(key)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Check the key info
+ info, err := b.ActiveKeyInfo()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if info.Term != 1 {
+ t.Fatalf("Bad term: %d", info.Term)
+ }
+ if time.Since(info.InstallTime) > time.Second {
+ t.Fatalf("Bad install: %v", info.InstallTime)
+ }
+ first := info.InstallTime
+
+ // Write a key
+ e1 := &Entry{Key: "test", Value: []byte("test")}
+ if err := b.Put(e1); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Rotate the encryption key
+ newTerm, err := b.Rotate()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if newTerm != 2 {
+ t.Fatalf("bad: %v", newTerm)
+ }
+
+ // Check the key info
+ info, err = b.ActiveKeyInfo()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if info.Term != 2 {
+ t.Fatalf("Bad term: %d", info.Term)
+ }
+ if !info.InstallTime.After(first) {
+ t.Fatalf("Bad install: %v", info.InstallTime)
+ }
+
+ // Write another key
+ e2 := &Entry{Key: "foo", Value: []byte("test")}
+ if err := b.Put(e2); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Reading both should work
+ out, err := b.Get(e1.Key)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out == nil {
+ t.Fatalf("bad: %v", out)
+ }
+
+ out, err = b.Get(e2.Key)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out == nil {
+ t.Fatalf("bad: %v", out)
+ }
+
+ // Seal and unseal
+ err = b.Seal()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ err = b.Unseal(key)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Reading both should work
+ out, err = b.Get(e1.Key)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out == nil {
+ t.Fatalf("bad: %v", out)
+ }
+
+ out, err = b.Get(e2.Key)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out == nil {
+ t.Fatalf("bad: %v", out)
+ }
+
+ // Should be fine to reload keyring
+ err = b.ReloadKeyring()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func testBarrier_Rekey(t *testing.T, b SecurityBarrier) {
+ // Initialize the barrier
+ key, _ := b.GenerateKey()
+ b.Initialize(key)
+ err := b.Unseal(key)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Write a key
+ e1 := &Entry{Key: "test", Value: []byte("test")}
+ if err := b.Put(e1); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Verify the master key
+ if err := b.VerifyMaster(key); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Rekey to a new key
+ newKey, _ := b.GenerateKey()
+ err = b.Rekey(newKey)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Verify the old master key
+ if err := b.VerifyMaster(key); err != ErrBarrierInvalidKey {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Verify the new master key
+ if err := b.VerifyMaster(newKey); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Reading should work
+ out, err := b.Get(e1.Key)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out == nil {
+ t.Fatalf("bad: %v", out)
+ }
+
+ // Seal
+ err = b.Seal()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Unseal with old key should fail
+ err = b.Unseal(key)
+ if err == nil {
+ t.Fatalf("unseal should fail")
+ }
+
+ // Unseal with new keys should work
+ err = b.Unseal(newKey)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Reading should work
+ out, err = b.Get(e1.Key)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out == nil {
+ t.Fatalf("bad: %v", out)
+ }
+
+ // Should be fine to reload keyring
+ err = b.ReloadKeyring()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func testBarrier_Upgrade(t *testing.T, b1, b2 SecurityBarrier) {
+ // Initialize the barrier
+ key, _ := b1.GenerateKey()
+ b1.Initialize(key)
+ err := b1.Unseal(key)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ err = b2.Unseal(key)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Rotate the encryption key
+ newTerm, err := b1.Rotate()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Create upgrade path
+ err = b1.CreateUpgrade(newTerm)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Check for an upgrade
+ did, updated, err := b2.CheckUpgrade()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !did || updated != newTerm {
+ t.Fatalf("failed to upgrade")
+ }
+
+ // Should have no upgrades pending
+ did, updated, err = b2.CheckUpgrade()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if did {
+ t.Fatalf("should not have upgrade")
+ }
+
+ // Rotate the encryption key
+ newTerm, err = b1.Rotate()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Create upgrade path
+ err = b1.CreateUpgrade(newTerm)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Destroy upgrade path
+ err = b1.DestroyUpgrade(newTerm)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should have no upgrades pending
+ did, updated, err = b2.CheckUpgrade()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if did {
+ t.Fatalf("should not have upgrade")
+ }
+}
+
+func testBarrier_Upgrade_Rekey(t *testing.T, b1, b2 SecurityBarrier) {
+ // Initialize the barrier
+ key, _ := b1.GenerateKey()
+ b1.Initialize(key)
+ err := b1.Unseal(key)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ err = b2.Unseal(key)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Rekey to a new key
+ newKey, _ := b1.GenerateKey()
+ err = b1.Rekey(newKey)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Reload the master key
+ err = b2.ReloadMasterKey()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Reload the keyring
+ err = b2.ReloadKeyring()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/barrier_view.go b/vendor/github.com/hashicorp/vault/vault/barrier_view.go
new file mode 100644
index 0000000..0fa6f2d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/barrier_view.go
@@ -0,0 +1,119 @@
+package vault
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+// BarrierView wraps a SecurityBarrier and ensures all access is automatically
+// prefixed. This is used to prevent anyone with access to the view to access
+// any data in the durable storage outside of their prefix. Conceptually this
+// is like a "chroot" into the barrier.
+//
+// BarrierView implements logical.Storage so it can be passed in as the
+// durable storage mechanism for logical views.
+type BarrierView struct {
+ barrier BarrierStorage
+ prefix string
+ readonly bool
+}
+
+// NewBarrierView takes an underlying security barrier and returns
+// a view of it that can only operate with the given prefix.
+func NewBarrierView(barrier BarrierStorage, prefix string) *BarrierView {
+ return &BarrierView{
+ barrier: barrier,
+ prefix: prefix,
+ }
+}
+
+// sanityCheck is used to perform a sanity check on a key
+func (v *BarrierView) sanityCheck(key string) error {
+ if strings.Contains(key, "..") {
+ return fmt.Errorf("key cannot be relative path")
+ }
+ return nil
+}
+
+// logical.Storage impl.
+func (v *BarrierView) List(prefix string) ([]string, error) {
+ if err := v.sanityCheck(prefix); err != nil {
+ return nil, err
+ }
+ return v.barrier.List(v.expandKey(prefix))
+}
+
+// logical.Storage impl.
+func (v *BarrierView) Get(key string) (*logical.StorageEntry, error) {
+ if err := v.sanityCheck(key); err != nil {
+ return nil, err
+ }
+ entry, err := v.barrier.Get(v.expandKey(key))
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+ if entry != nil {
+ entry.Key = v.truncateKey(entry.Key)
+ }
+
+ return &logical.StorageEntry{
+ Key: entry.Key,
+ Value: entry.Value,
+ }, nil
+}
+
+// logical.Storage impl.
+func (v *BarrierView) Put(entry *logical.StorageEntry) error {
+ if err := v.sanityCheck(entry.Key); err != nil {
+ return err
+ }
+
+ expandedKey := v.expandKey(entry.Key)
+
+ if v.readonly {
+ return logical.ErrReadOnly
+ }
+
+ nested := &Entry{
+ Key: expandedKey,
+ Value: entry.Value,
+ }
+ return v.barrier.Put(nested)
+}
+
+// logical.Storage impl.
+func (v *BarrierView) Delete(key string) error {
+ if err := v.sanityCheck(key); err != nil {
+ return err
+ }
+
+ expandedKey := v.expandKey(key)
+
+ if v.readonly {
+ return logical.ErrReadOnly
+ }
+
+
+ return v.barrier.Delete(expandedKey)
+}
+
+// SubView constructs a nested sub-view using the given prefix
+func (v *BarrierView) SubView(prefix string) *BarrierView {
+ sub := v.expandKey(prefix)
+ return &BarrierView{barrier: v.barrier, prefix: sub, readonly: v.readonly}
+}
+
+// expandKey is used to expand to the full key path with the prefix
+func (v *BarrierView) expandKey(suffix string) string {
+ return v.prefix + suffix
+}
+
+// truncateKey is used to remove the prefix of the key
+func (v *BarrierView) truncateKey(full string) string {
+ return strings.TrimPrefix(full, v.prefix)
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/barrier_view_test.go b/vendor/github.com/hashicorp/vault/vault/barrier_view_test.go
new file mode 100644
index 0000000..59a0c38
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/barrier_view_test.go
@@ -0,0 +1,316 @@
+package vault
+
+import (
+ "reflect"
+ "sort"
+ "testing"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestBarrierView_impl(t *testing.T) {
+ var _ logical.Storage = new(BarrierView)
+}
+
+func TestBarrierView_spec(t *testing.T) {
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "foo/")
+ logical.TestStorage(t, view)
+}
+
+func TestBarrierView_BadKeysKeys(t *testing.T) {
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "foo/")
+
+ _, err := view.List("../")
+ if err == nil {
+ t.Fatalf("expected error")
+ }
+
+ _, err = view.Get("../")
+ if err == nil {
+ t.Fatalf("expected error")
+ }
+
+ err = view.Delete("../foo")
+ if err == nil {
+ t.Fatalf("expected error")
+ }
+
+ le := &logical.StorageEntry{
+ Key: "../foo",
+ Value: []byte("test"),
+ }
+ err = view.Put(le)
+ if err == nil {
+ t.Fatalf("expected error")
+ }
+}
+
+func TestBarrierView(t *testing.T) {
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "foo/")
+
+ // Write a key outside of foo/
+ entry := &Entry{Key: "test", Value: []byte("test")}
+ if err := barrier.Put(entry); err != nil {
+ t.Fatalf("bad: %v", err)
+ }
+
+ // List should have no visibility
+ keys, err := view.List("")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(keys) != 0 {
+ t.Fatalf("bad: %v", err)
+ }
+
+ // Get should have no visibility
+ out, err := view.Get("test")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out != nil {
+ t.Fatalf("bad: %v", out)
+ }
+
+ // Try to put the same entry via the view
+ if err := view.Put(entry.Logical()); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Check it is nested
+ entry, err = barrier.Get("foo/test")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if entry == nil {
+ t.Fatalf("missing nested foo/test")
+ }
+
+ // Delete nested
+ if err := view.Delete("test"); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Check the nested key
+ entry, err = barrier.Get("foo/test")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if entry != nil {
+ t.Fatalf("nested foo/test should be gone")
+ }
+
+ // Check the non-nested key
+ entry, err = barrier.Get("test")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if entry == nil {
+ t.Fatalf("root test missing")
+ }
+}
+
+func TestBarrierView_SubView(t *testing.T) {
+ _, barrier, _ := mockBarrier(t)
+ root := NewBarrierView(barrier, "foo/")
+ view := root.SubView("bar/")
+
+ // List should have no visibility
+ keys, err := view.List("")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(keys) != 0 {
+ t.Fatalf("bad: %v", err)
+ }
+
+ // Get should have no visibility
+ out, err := view.Get("test")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out != nil {
+ t.Fatalf("bad: %v", out)
+ }
+
+ // Try to put the same entry via the view
+ entry := &logical.StorageEntry{Key: "test", Value: []byte("test")}
+ if err := view.Put(entry); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Check it is nested
+ bout, err := barrier.Get("foo/bar/test")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if bout == nil {
+ t.Fatalf("missing nested foo/bar/test")
+ }
+
+ // Check for visibility in root
+ out, err = root.Get("bar/test")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out == nil {
+ t.Fatalf("missing nested bar/test")
+ }
+
+ // Delete nested
+ if err := view.Delete("test"); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Check the nested key
+ bout, err = barrier.Get("foo/bar/test")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if bout != nil {
+ t.Fatalf("nested foo/bar/test should be gone")
+ }
+}
+
+func TestBarrierView_Scan(t *testing.T) {
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "view/")
+
+ expect := []string{}
+ ent := []*logical.StorageEntry{
+ &logical.StorageEntry{Key: "foo", Value: []byte("test")},
+ &logical.StorageEntry{Key: "zip", Value: []byte("test")},
+ &logical.StorageEntry{Key: "foo/bar", Value: []byte("test")},
+ &logical.StorageEntry{Key: "foo/zap", Value: []byte("test")},
+ &logical.StorageEntry{Key: "foo/bar/baz", Value: []byte("test")},
+ &logical.StorageEntry{Key: "foo/bar/zoo", Value: []byte("test")},
+ }
+
+ for _, e := range ent {
+ expect = append(expect, e.Key)
+ if err := view.Put(e); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ }
+
+ var out []string
+ cb := func(path string) {
+ out = append(out, path)
+ }
+
+ // Collect the keys
+ if err := logical.ScanView(view, cb); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ sort.Strings(out)
+ sort.Strings(expect)
+ if !reflect.DeepEqual(out, expect) {
+ t.Fatalf("out: %v expect: %v", out, expect)
+ }
+}
+
+func TestBarrierView_CollectKeys(t *testing.T) {
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "view/")
+
+ expect := []string{}
+ ent := []*logical.StorageEntry{
+ &logical.StorageEntry{Key: "foo", Value: []byte("test")},
+ &logical.StorageEntry{Key: "zip", Value: []byte("test")},
+ &logical.StorageEntry{Key: "foo/bar", Value: []byte("test")},
+ &logical.StorageEntry{Key: "foo/zap", Value: []byte("test")},
+ &logical.StorageEntry{Key: "foo/bar/baz", Value: []byte("test")},
+ &logical.StorageEntry{Key: "foo/bar/zoo", Value: []byte("test")},
+ }
+
+ for _, e := range ent {
+ expect = append(expect, e.Key)
+ if err := view.Put(e); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ }
+
+ // Collect the keys
+ out, err := logical.CollectKeys(view)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ sort.Strings(out)
+ sort.Strings(expect)
+ if !reflect.DeepEqual(out, expect) {
+ t.Fatalf("out: %v expect: %v", out, expect)
+ }
+}
+
+func TestBarrierView_ClearView(t *testing.T) {
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "view/")
+
+ expect := []string{}
+ ent := []*logical.StorageEntry{
+ &logical.StorageEntry{Key: "foo", Value: []byte("test")},
+ &logical.StorageEntry{Key: "zip", Value: []byte("test")},
+ &logical.StorageEntry{Key: "foo/bar", Value: []byte("test")},
+ &logical.StorageEntry{Key: "foo/zap", Value: []byte("test")},
+ &logical.StorageEntry{Key: "foo/bar/baz", Value: []byte("test")},
+ &logical.StorageEntry{Key: "foo/bar/zoo", Value: []byte("test")},
+ }
+
+ for _, e := range ent {
+ expect = append(expect, e.Key)
+ if err := view.Put(e); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ }
+
+ // Clear the keys
+ if err := logical.ClearView(view); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Collect the keys
+ out, err := logical.CollectKeys(view)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(out) != 0 {
+ t.Fatalf("have keys: %#v", out)
+ }
+}
+func TestBarrierView_Readonly(t *testing.T) {
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "foo/")
+
+ // Add a key before enabling read-only
+ entry := &Entry{Key: "test", Value: []byte("test")}
+ if err := view.Put(entry.Logical()); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Enable read only mode
+ view.readonly = true
+
+ // Put should fail in readonly mode
+ if err := view.Put(entry.Logical()); err != logical.ErrReadOnly {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Delete nested
+ if err := view.Delete("test"); err != logical.ErrReadOnly {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Check the non-nested key
+ e, err := view.Get("test")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if e == nil {
+ t.Fatalf("key test missing")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/capabilities.go b/vendor/github.com/hashicorp/vault/vault/capabilities.go
new file mode 100644
index 0000000..6994e52
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/capabilities.go
@@ -0,0 +1,52 @@
+package vault
+
+import (
+ "sort"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+// Capabilities is used to fetch the capabilities of the given token on the given path
+func (c *Core) Capabilities(token, path string) ([]string, error) {
+ if path == "" {
+ return nil, &logical.StatusBadRequest{Err: "missing path"}
+ }
+
+ if token == "" {
+ return nil, &logical.StatusBadRequest{Err: "missing token"}
+ }
+
+ te, err := c.tokenStore.Lookup(token)
+ if err != nil {
+ return nil, err
+ }
+ if te == nil {
+ return nil, &logical.StatusBadRequest{Err: "invalid token"}
+ }
+
+ if te.Policies == nil {
+ return []string{DenyCapability}, nil
+ }
+
+ var policies []*Policy
+ for _, tePolicy := range te.Policies {
+ policy, err := c.policyStore.GetPolicy(tePolicy)
+ if err != nil {
+ return nil, err
+ }
+ policies = append(policies, policy)
+ }
+
+ if len(policies) == 0 {
+ return []string{DenyCapability}, nil
+ }
+
+ acl, err := NewACL(policies)
+ if err != nil {
+ return nil, err
+ }
+
+ capabilities := acl.Capabilities(path)
+ sort.Strings(capabilities)
+ return capabilities, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/capabilities_test.go b/vendor/github.com/hashicorp/vault/vault/capabilities_test.go
new file mode 100644
index 0000000..0db7eac
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/capabilities_test.go
@@ -0,0 +1,45 @@
+package vault
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestCapabilities(t *testing.T) {
+ c, _, token := TestCoreUnsealed(t)
+
+ actual, err := c.Capabilities(token, "path")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ expected := []string{"root"}
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: got\n%#v\nexpected\n%#v\n", actual, expected)
+ }
+
+ // Create a policy
+ policy, _ := Parse(aclPolicy)
+ err = c.policyStore.SetPolicy(policy)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Create a token for the policy
+ ent := &TokenEntry{
+ ID: "capabilitiestoken",
+ Path: "testpath",
+ Policies: []string{"dev"},
+ }
+ if err := c.tokenStore.create(ent); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ actual, err = c.Capabilities("capabilitiestoken", "foo/bar")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ expected = []string{"create", "read", "sudo"}
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: got\n%#v\nexpected\n%#v\n", actual, expected)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/cluster.go b/vendor/github.com/hashicorp/vault/vault/cluster.go
new file mode 100644
index 0000000..dc9fb65
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/cluster.go
@@ -0,0 +1,531 @@
+package vault
+
+import (
+ "bytes"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/tls"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math/big"
+ mathrand "math/rand"
+ "net"
+ "net/http"
+ "time"
+
+ log "github.com/mgutz/logxi/v1"
+
+ "golang.org/x/net/http2"
+
+ "github.com/hashicorp/errwrap"
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/helper/forwarding"
+ "github.com/hashicorp/vault/helper/jsonutil"
+)
+
+const (
+ // Storage path where the local cluster name and identifier are stored
+ coreLocalClusterInfoPath = "core/cluster/local/info"
+
+ corePrivateKeyTypeP521 = "p521"
+ corePrivateKeyTypeED25519 = "ed25519"
+
+ // Internal so as not to log a trace message
+ IntNoForwardingHeaderName = "X-Vault-Internal-No-Request-Forwarding"
+)
+
+var (
+ ErrCannotForward = errors.New("cannot forward request; no connection or address not known")
+)
+
+// This can be one of a few key types so the different params may or may not be filled
+type clusterKeyParams struct {
+ Type string `json:"type" structs:"type" mapstructure:"type"`
+ X *big.Int `json:"x" structs:"x" mapstructure:"x"`
+ Y *big.Int `json:"y" structs:"y" mapstructure:"y"`
+ D *big.Int `json:"d" structs:"d" mapstructure:"d"`
+}
+
+type activeConnection struct {
+ transport *http2.Transport
+ clusterAddr string
+}
+
+// Structure representing the storage entry that holds cluster information
+type Cluster struct {
+ // Name of the cluster
+ Name string `json:"name" structs:"name" mapstructure:"name"`
+
+ // Identifier of the cluster
+ ID string `json:"id" structs:"id" mapstructure:"id"`
+}
+
+// Cluster fetches the details of the local cluster. This method errors out
+// when Vault is sealed.
+func (c *Core) Cluster() (*Cluster, error) {
+ var cluster Cluster
+
+ // Fetch the storage entry. This call fails when Vault is sealed.
+ entry, err := c.barrier.Get(coreLocalClusterInfoPath)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return &cluster, nil
+ }
+
+ // Decode the cluster information
+ if err = jsonutil.DecodeJSON(entry.Value, &cluster); err != nil {
+ return nil, fmt.Errorf("failed to decode cluster details: %v", err)
+ }
+
+ // Set in config file
+ if c.clusterName != "" {
+ cluster.Name = c.clusterName
+ }
+
+ return &cluster, nil
+}
+
+// This sets our local cluster cert and private key based on the advertisement.
+// It also ensures the cert is in our local cluster cert pool.
+func (c *Core) loadLocalClusterTLS(adv activeAdvertisement) (retErr error) {
+ defer func() {
+ if retErr != nil {
+ c.clusterParamsLock.Lock()
+ c.localClusterCert = nil
+ c.localClusterPrivateKey = nil
+ c.localClusterParsedCert = nil
+ c.clusterParamsLock.Unlock()
+
+ c.requestForwardingConnectionLock.Lock()
+ c.clearForwardingClients()
+ c.requestForwardingConnectionLock.Unlock()
+ }
+ }()
+
+ switch {
+ case adv.ClusterAddr == "":
+ // Clustering disabled on the server, don't try to look for params
+ return nil
+
+ case adv.ClusterKeyParams == nil:
+ c.logger.Error("core: no key params found loading local cluster TLS information")
+ return fmt.Errorf("no local cluster key params found")
+
+ case adv.ClusterKeyParams.X == nil, adv.ClusterKeyParams.Y == nil, adv.ClusterKeyParams.D == nil:
+ c.logger.Error("core: failed to parse local cluster key due to missing params")
+ return fmt.Errorf("failed to parse local cluster key")
+
+ case adv.ClusterKeyParams.Type != corePrivateKeyTypeP521:
+ c.logger.Error("core: unknown local cluster key type", "key_type", adv.ClusterKeyParams.Type)
+ return fmt.Errorf("failed to find valid local cluster key type")
+
+ case adv.ClusterCert == nil || len(adv.ClusterCert) == 0:
+ c.logger.Error("core: no local cluster cert found")
+ return fmt.Errorf("no local cluster cert found")
+
+ }
+
+ // Prevent data races with the TLS parameters
+ c.clusterParamsLock.Lock()
+ defer c.clusterParamsLock.Unlock()
+
+ c.localClusterPrivateKey = &ecdsa.PrivateKey{
+ PublicKey: ecdsa.PublicKey{
+ Curve: elliptic.P521(),
+ X: adv.ClusterKeyParams.X,
+ Y: adv.ClusterKeyParams.Y,
+ },
+ D: adv.ClusterKeyParams.D,
+ }
+
+ c.localClusterCert = adv.ClusterCert
+
+ cert, err := x509.ParseCertificate(c.localClusterCert)
+ if err != nil {
+ c.logger.Error("core: failed parsing local cluster certificate", "error", err)
+ return fmt.Errorf("error parsing local cluster certificate: %v", err)
+ }
+
+ c.localClusterParsedCert = cert
+
+ return nil
+}
+
+// setupCluster creates storage entries for holding Vault cluster information.
+// Entries will be created only if they are not already present. If clusterName
+// is not supplied, this method will auto-generate it.
+func (c *Core) setupCluster() error {
+ // Prevent data races with the TLS parameters
+ c.clusterParamsLock.Lock()
+ defer c.clusterParamsLock.Unlock()
+
+ // Check if storage index is already present or not
+ cluster, err := c.Cluster()
+ if err != nil {
+ c.logger.Error("core: failed to get cluster details", "error", err)
+ return err
+ }
+
+ var modified bool
+
+ if cluster == nil {
+ cluster = &Cluster{}
+ }
+
+ if cluster.Name == "" {
+ // If cluster name is not supplied, generate one
+ if c.clusterName == "" {
+ c.logger.Trace("core: cluster name not found/set, generating new")
+ clusterNameBytes, err := uuid.GenerateRandomBytes(4)
+ if err != nil {
+ c.logger.Error("core: failed to generate cluster name", "error", err)
+ return err
+ }
+
+ c.clusterName = fmt.Sprintf("vault-cluster-%08x", clusterNameBytes)
+ }
+
+ cluster.Name = c.clusterName
+ if c.logger.IsDebug() {
+ c.logger.Debug("core: cluster name set", "name", cluster.Name)
+ }
+ modified = true
+ }
+
+ if cluster.ID == "" {
+ c.logger.Trace("core: cluster ID not found, generating new")
+ // Generate a clusterID
+ cluster.ID, err = uuid.GenerateUUID()
+ if err != nil {
+ c.logger.Error("core: failed to generate cluster identifier", "error", err)
+ return err
+ }
+ if c.logger.IsDebug() {
+ c.logger.Debug("core: cluster ID set", "id", cluster.ID)
+ }
+ modified = true
+ }
+
+ // If we're using HA, generate server-to-server parameters
+ if c.ha != nil {
+ // Create a private key
+ if c.localClusterPrivateKey == nil {
+ c.logger.Trace("core: generating cluster private key")
+ key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
+ if err != nil {
+ c.logger.Error("core: failed to generate local cluster key", "error", err)
+ return err
+ }
+
+ c.localClusterPrivateKey = key
+ }
+
+ // Create a certificate
+ if c.localClusterCert == nil {
+ c.logger.Trace("core: generating local cluster certificate")
+
+ host, err := uuid.GenerateUUID()
+ if err != nil {
+ return err
+ }
+ host = fmt.Sprintf("fw-%s", host)
+ template := &x509.Certificate{
+ Subject: pkix.Name{
+ CommonName: host,
+ },
+ DNSNames: []string{host},
+ ExtKeyUsage: []x509.ExtKeyUsage{
+ x509.ExtKeyUsageServerAuth,
+ x509.ExtKeyUsageClientAuth,
+ },
+ KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement | x509.KeyUsageCertSign,
+ SerialNumber: big.NewInt(mathrand.Int63()),
+ NotBefore: time.Now().Add(-30 * time.Second),
+ // 30 years of single-active uptime ought to be enough for anybody
+ NotAfter: time.Now().Add(262980 * time.Hour),
+ BasicConstraintsValid: true,
+ IsCA: true,
+ }
+
+ certBytes, err := x509.CreateCertificate(rand.Reader, template, template, c.localClusterPrivateKey.Public(), c.localClusterPrivateKey)
+ if err != nil {
+ c.logger.Error("core: error generating self-signed cert", "error", err)
+ return errwrap.Wrapf("unable to generate local cluster certificate: {{err}}", err)
+ }
+
+ parsedCert, err := x509.ParseCertificate(certBytes)
+ if err != nil {
+ c.logger.Error("core: error parsing self-signed cert", "error", err)
+ return errwrap.Wrapf("error parsing generated certificate: {{err}}", err)
+ }
+
+ c.localClusterCert = certBytes
+ c.localClusterParsedCert = parsedCert
+ }
+ }
+
+ if modified {
+ // Encode the cluster information into as a JSON string
+ rawCluster, err := json.Marshal(cluster)
+ if err != nil {
+ c.logger.Error("core: failed to encode cluster details", "error", err)
+ return err
+ }
+
+ // Store it
+ err = c.barrier.Put(&Entry{
+ Key: coreLocalClusterInfoPath,
+ Value: rawCluster,
+ })
+ if err != nil {
+ c.logger.Error("core: failed to store cluster details", "error", err)
+ return err
+ }
+ }
+
+ return nil
+}
+
+// SetClusterSetupFuncs sets the handler setup func
+func (c *Core) SetClusterSetupFuncs(handler func() (http.Handler, http.Handler)) {
+ c.clusterHandlerSetupFunc = handler
+}
+
+// startClusterListener starts cluster request listeners during postunseal. It
+// is assumed that the state lock is held while this is run. Right now this
+// only starts forwarding listeners; it's TBD whether other request types will
+// be built in the same mechanism or started independently.
+func (c *Core) startClusterListener() error {
+ if c.clusterHandlerSetupFunc == nil {
+ c.logger.Error("core: cluster handler setup function has not been set when trying to start listeners")
+ return fmt.Errorf("cluster handler setup function has not been set")
+ }
+
+ if c.clusterAddr == "" {
+ c.logger.Info("core: clustering disabled, not starting listeners")
+ return nil
+ }
+
+ if c.clusterListenerAddrs == nil || len(c.clusterListenerAddrs) == 0 {
+ c.logger.Warn("core: clustering not disabled but no addresses to listen on")
+ return fmt.Errorf("cluster addresses not found")
+ }
+
+ c.logger.Trace("core: starting cluster listeners")
+
+ err := c.startForwarding()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// stopClusterListener stops any existing listeners during preseal. It is
+// assumed that the state lock is held while this is run.
+func (c *Core) stopClusterListener() {
+ if c.clusterAddr == "" {
+ c.logger.Trace("core: clustering disabled, not stopping listeners")
+ return
+ }
+
+ if !c.clusterListenersRunning {
+ c.logger.Info("core: cluster listeners not running")
+ return
+ }
+ c.logger.Info("core: stopping cluster listeners")
+
+ // Tell the goroutine managing the listeners to perform the shutdown
+ // process
+ c.clusterListenerShutdownCh <- struct{}{}
+
+ // The reason for this loop-de-loop is that we may be unsealing again
+ // quickly, and if the listeners are not yet closed, we will get socket
+ // bind errors. This ensures proper ordering.
+ c.logger.Trace("core: waiting for success notification while stopping cluster listeners")
+ <-c.clusterListenerShutdownSuccessCh
+ c.clusterListenersRunning = false
+
+ c.logger.Info("core: cluster listeners successfully shut down")
+}
+
+// ClusterTLSConfig generates a TLS configuration based on the local/replicated
+// cluster key and cert.
+func (c *Core) ClusterTLSConfig() (*tls.Config, error) {
+ // Using lookup functions allows just-in-time lookup of the current state
+ // of clustering as connections come and go
+
+ serverLookup := func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
+ switch {
+ default:
+ var localCert bytes.Buffer
+
+ c.clusterParamsLock.RLock()
+ localCert.Write(c.localClusterCert)
+ localSigner := c.localClusterPrivateKey
+ parsedCert := c.localClusterParsedCert
+ c.clusterParamsLock.RUnlock()
+
+ if localCert.Len() == 0 {
+ return nil, fmt.Errorf("got forwarding connection but no local cert")
+ }
+
+ //c.logger.Trace("core: performing cert name lookup", "hello_server_name", clientHello.ServerName, "local_cluster_cert_name", parsedCert.Subject.CommonName)
+
+ return &tls.Certificate{
+ Certificate: [][]byte{localCert.Bytes()},
+ PrivateKey: localSigner,
+ Leaf: parsedCert,
+ }, nil
+
+ }
+
+ return nil, nil
+ }
+
+ clientLookup := func(requestInfo *tls.CertificateRequestInfo) (*tls.Certificate, error) {
+ //c.logger.Trace("core: performing client cert lookup")
+
+ if len(requestInfo.AcceptableCAs) != 1 {
+ return nil, fmt.Errorf("expected only a single acceptable CA")
+ }
+ var localCert bytes.Buffer
+
+ c.clusterParamsLock.RLock()
+ localCert.Write(c.localClusterCert)
+ localSigner := c.localClusterPrivateKey
+ parsedCert := c.localClusterParsedCert
+ c.clusterParamsLock.RUnlock()
+
+ if localCert.Len() == 0 {
+ return nil, fmt.Errorf("forwarding connection client but no local cert")
+ }
+
+ return &tls.Certificate{
+ Certificate: [][]byte{localCert.Bytes()},
+ PrivateKey: localSigner,
+ Leaf: parsedCert,
+ }, nil
+ }
+
+ serverConfigLookup := func(clientHello *tls.ClientHelloInfo) (*tls.Config, error) {
+ //c.logger.Trace("core: performing server config lookup")
+ for _, v := range clientHello.SupportedProtos {
+ switch v {
+ case "h2", "req_fw_sb-act_v1":
+ default:
+ return nil, fmt.Errorf("unknown ALPN proto %s", v)
+ }
+ }
+
+ caPool := x509.NewCertPool()
+
+ ret := &tls.Config{
+ ClientAuth: tls.RequireAndVerifyClientCert,
+ GetCertificate: serverLookup,
+ GetClientCertificate: clientLookup,
+ MinVersion: tls.VersionTLS12,
+ RootCAs: caPool,
+ ClientCAs: caPool,
+ NextProtos: clientHello.SupportedProtos,
+ }
+
+ switch {
+ default:
+ c.clusterParamsLock.RLock()
+ parsedCert := c.localClusterParsedCert
+ c.clusterParamsLock.RUnlock()
+
+ if parsedCert == nil {
+ return nil, fmt.Errorf("forwarding connection client but no local cert")
+ }
+
+ caPool.AddCert(parsedCert)
+ }
+
+ return ret, nil
+ }
+
+ tlsConfig := &tls.Config{
+ ClientAuth: tls.RequireAndVerifyClientCert,
+ GetCertificate: serverLookup,
+ GetClientCertificate: clientLookup,
+ GetConfigForClient: serverConfigLookup,
+ MinVersion: tls.VersionTLS12,
+ }
+
+ var localCert bytes.Buffer
+ c.clusterParamsLock.RLock()
+ localCert.Write(c.localClusterCert)
+ parsedCert := c.localClusterParsedCert
+ c.clusterParamsLock.RUnlock()
+
+ if parsedCert != nil {
+ tlsConfig.ServerName = parsedCert.Subject.CommonName
+
+ pool := x509.NewCertPool()
+ pool.AddCert(parsedCert)
+ tlsConfig.RootCAs = pool
+ tlsConfig.ClientCAs = pool
+ }
+
+ return tlsConfig, nil
+}
+
+func (c *Core) SetClusterListenerAddrs(addrs []*net.TCPAddr) {
+ c.clusterListenerAddrs = addrs
+}
+
+// WrapHandlerForClustering takes in Vault's HTTP handler and returns a setup
+// function that returns both the original handler and one wrapped with cluster
+// methods
+func WrapHandlerForClustering(handler http.Handler, logger log.Logger) func() (http.Handler, http.Handler) {
+ return func() (http.Handler, http.Handler) {
+ // This mux handles cluster functions (right now, only forwarded requests)
+ mux := http.NewServeMux()
+ mux.HandleFunc("/cluster/local/forwarded-request", func(w http.ResponseWriter, req *http.Request) {
+ //logger.Trace("forwarding: serving h2 forwarded request")
+ freq, err := forwarding.ParseForwardedHTTPRequest(req)
+ if err != nil {
+ if logger != nil {
+ logger.Error("http/forwarded-request-server: error parsing forwarded request", "error", err)
+ }
+
+ w.Header().Add("Content-Type", "application/json")
+
+ // The response writer here is different from
+ // the one set in Vault's HTTP handler.
+ // Hence, set the Cache-Control explicitly.
+ w.Header().Set("Cache-Control", "no-store")
+
+ w.WriteHeader(http.StatusInternalServerError)
+
+ type errorResponse struct {
+ Errors []string
+ }
+ resp := &errorResponse{
+ Errors: []string{
+ err.Error(),
+ },
+ }
+
+ enc := json.NewEncoder(w)
+ enc.Encode(resp)
+ return
+ }
+
+ // To avoid the risk of a forward loop in some pathological condition,
+ // set the no-forward header
+ freq.Header.Set(IntNoForwardingHeaderName, "true")
+ handler.ServeHTTP(w, freq)
+ })
+
+ return handler, mux
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/cluster_test.go b/vendor/github.com/hashicorp/vault/vault/cluster_test.go
new file mode 100644
index 0000000..d3ee512
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/cluster_test.go
@@ -0,0 +1,388 @@
+package vault
+
+import (
+ "bytes"
+ "crypto/tls"
+ "fmt"
+ "net"
+ "net/http"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/helper/consts"
+ "github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/physical"
+ log "github.com/mgutz/logxi/v1"
+)
+
+var (
+ clusterTestPausePeriod = 2 * time.Second
+)
+
+func TestClusterFetching(t *testing.T) {
+ c, _, _ := TestCoreUnsealed(t)
+
+ err := c.setupCluster()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cluster, err := c.Cluster()
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Test whether expected values are found
+ if cluster == nil || cluster.Name == "" || cluster.ID == "" {
+ t.Fatalf("cluster information missing: cluster: %#v", cluster)
+ }
+}
+
+func TestClusterHAFetching(t *testing.T) {
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ redirect := "http://127.0.0.1:8200"
+
+ c, err := NewCore(&CoreConfig{
+ Physical: physical.NewInmemHA(logger),
+ HAPhysical: physical.NewInmemHA(logger),
+ RedirectAddr: redirect,
+ DisableMlock: true,
+ })
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ keys, _ := TestCoreInit(t, c)
+ for _, key := range keys {
+ if _, err := TestCoreUnseal(c, TestKeyCopy(key)); err != nil {
+ t.Fatalf("unseal err: %s", err)
+ }
+ }
+
+ // Verify unsealed
+ sealed, err := c.Sealed()
+ if err != nil {
+ t.Fatalf("err checking seal status: %s", err)
+ }
+ if sealed {
+ t.Fatal("should not be sealed")
+ }
+
+ // Wait for core to become active
+ TestWaitActive(t, c)
+
+ cluster, err := c.Cluster()
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Test whether expected values are found
+ if cluster == nil || cluster.Name == "" || cluster.ID == "" {
+ t.Fatalf("cluster information missing: cluster:%#v", cluster)
+ }
+}
+
+func TestCluster_ListenForRequests(t *testing.T) {
+ // Make this nicer for tests
+ manualStepDownSleepPeriod = 5 * time.Second
+
+ cores := TestCluster(t, []http.Handler{nil, nil, nil}, nil, false)
+ for _, core := range cores {
+ defer core.CloseListeners()
+ }
+
+ root := cores[0].Root
+
+ // Wait for core to become active
+ TestWaitActive(t, cores[0].Core)
+
+ // Use this to have a valid config after sealing since ClusterTLSConfig returns nil
+ var lastTLSConfig *tls.Config
+ checkListenersFunc := func(expectFail bool) {
+ tlsConfig, err := cores[0].ClusterTLSConfig()
+ if err != nil {
+ if err.Error() != consts.ErrSealed.Error() {
+ t.Fatal(err)
+ }
+ tlsConfig = lastTLSConfig
+ } else {
+ tlsConfig.NextProtos = []string{"h2"}
+ lastTLSConfig = tlsConfig
+ }
+
+ for _, ln := range cores[0].Listeners {
+ tcpAddr, ok := ln.Addr().(*net.TCPAddr)
+ if !ok {
+ t.Fatalf("%s not a TCP port", tcpAddr.String())
+ }
+
+ conn, err := tls.Dial("tcp", fmt.Sprintf("%s:%d", tcpAddr.IP.String(), tcpAddr.Port+10), tlsConfig)
+ if err != nil {
+ if expectFail {
+ t.Logf("testing %s:%d unsuccessful as expected", tcpAddr.IP.String(), tcpAddr.Port+10)
+ continue
+ }
+ t.Fatalf("error: %v\nlisteners are\n%#v\n%#v\n", err, cores[0].Listeners[0], cores[0].Listeners[1])
+ }
+ if expectFail {
+ t.Fatalf("testing %s:%d not unsuccessful as expected", tcpAddr.IP.String(), tcpAddr.Port+10)
+ }
+ err = conn.Handshake()
+ if err != nil {
+ t.Fatal(err)
+ }
+ connState := conn.ConnectionState()
+ switch {
+ case connState.Version != tls.VersionTLS12:
+ t.Fatal("version mismatch")
+ case connState.NegotiatedProtocol != "h2" || !connState.NegotiatedProtocolIsMutual:
+ t.Fatal("bad protocol negotiation")
+ }
+ t.Logf("testing %s:%d successful", tcpAddr.IP.String(), tcpAddr.Port+10)
+ }
+ }
+
+ time.Sleep(clusterTestPausePeriod)
+ checkListenersFunc(false)
+
+ err := cores[0].StepDown(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "sys/step-down",
+ ClientToken: root,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // StepDown doesn't wait during actual preSeal so give time for listeners
+ // to close
+ time.Sleep(clusterTestPausePeriod)
+ checkListenersFunc(true)
+
+ // After this period it should be active again
+ time.Sleep(manualStepDownSleepPeriod)
+ checkListenersFunc(false)
+
+ err = cores[0].Seal(root)
+ if err != nil {
+ t.Fatal(err)
+ }
+ time.Sleep(clusterTestPausePeriod)
+ // After sealing it should be inactive again
+ checkListenersFunc(true)
+}
+
+func TestCluster_ForwardRequests(t *testing.T) {
+ // Make this nicer for tests
+ manualStepDownSleepPeriod = 5 * time.Second
+
+ testCluster_ForwardRequestsCommon(t, false)
+ testCluster_ForwardRequestsCommon(t, true)
+ os.Setenv("VAULT_USE_GRPC_REQUEST_FORWARDING", "")
+}
+
+func testCluster_ForwardRequestsCommon(t *testing.T, rpc bool) {
+ if rpc {
+ os.Setenv("VAULT_USE_GRPC_REQUEST_FORWARDING", "1")
+ } else {
+ os.Setenv("VAULT_USE_GRPC_REQUEST_FORWARDING", "")
+ }
+
+ handler1 := http.NewServeMux()
+ handler1.HandleFunc("/core1", func(w http.ResponseWriter, req *http.Request) {
+ w.Header().Add("Content-Type", "application/json")
+ w.WriteHeader(201)
+ w.Write([]byte("core1"))
+ })
+ handler2 := http.NewServeMux()
+ handler2.HandleFunc("/core2", func(w http.ResponseWriter, req *http.Request) {
+ w.Header().Add("Content-Type", "application/json")
+ w.WriteHeader(202)
+ w.Write([]byte("core2"))
+ })
+ handler3 := http.NewServeMux()
+ handler3.HandleFunc("/core3", func(w http.ResponseWriter, req *http.Request) {
+ w.Header().Add("Content-Type", "application/json")
+ w.WriteHeader(203)
+ w.Write([]byte("core3"))
+ })
+
+ cores := TestCluster(t, []http.Handler{handler1, handler2, handler3}, nil, true)
+ for _, core := range cores {
+ defer core.CloseListeners()
+ }
+
+ root := cores[0].Root
+
+ // Wait for core to become active
+ TestWaitActive(t, cores[0].Core)
+
+ // Test forwarding a request. Since we're going directly from core to core
+ // with no fallback we know that if it worked, request handling is working
+ testCluster_ForwardRequests(t, cores[1], "core1")
+ testCluster_ForwardRequests(t, cores[2], "core1")
+
+ //
+ // Now we do a bunch of round-robining. The point is to make sure that as
+ // nodes come and go, we can always successfully forward to the active
+ // node.
+ //
+
+ // Ensure active core is cores[1] and test
+ err := cores[0].StepDown(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "sys/step-down",
+ ClientToken: root,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ time.Sleep(clusterTestPausePeriod)
+ _ = cores[2].StepDown(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "sys/step-down",
+ ClientToken: root,
+ })
+ time.Sleep(clusterTestPausePeriod)
+ TestWaitActive(t, cores[1].Core)
+ testCluster_ForwardRequests(t, cores[0], "core2")
+ testCluster_ForwardRequests(t, cores[2], "core2")
+
+ // Ensure active core is cores[2] and test
+ err = cores[1].StepDown(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "sys/step-down",
+ ClientToken: root,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ time.Sleep(clusterTestPausePeriod)
+ _ = cores[0].StepDown(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "sys/step-down",
+ ClientToken: root,
+ })
+ time.Sleep(clusterTestPausePeriod)
+ TestWaitActive(t, cores[2].Core)
+ testCluster_ForwardRequests(t, cores[0], "core3")
+ testCluster_ForwardRequests(t, cores[1], "core3")
+
+ // Ensure active core is cores[0] and test
+ err = cores[2].StepDown(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "sys/step-down",
+ ClientToken: root,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ time.Sleep(clusterTestPausePeriod)
+ _ = cores[1].StepDown(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "sys/step-down",
+ ClientToken: root,
+ })
+ time.Sleep(clusterTestPausePeriod)
+ TestWaitActive(t, cores[0].Core)
+ testCluster_ForwardRequests(t, cores[1], "core1")
+ testCluster_ForwardRequests(t, cores[2], "core1")
+
+ // Ensure active core is cores[1] and test
+ err = cores[0].StepDown(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "sys/step-down",
+ ClientToken: root,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ time.Sleep(clusterTestPausePeriod)
+ _ = cores[2].StepDown(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "sys/step-down",
+ ClientToken: root,
+ })
+ time.Sleep(clusterTestPausePeriod)
+ TestWaitActive(t, cores[1].Core)
+ testCluster_ForwardRequests(t, cores[0], "core2")
+ testCluster_ForwardRequests(t, cores[2], "core2")
+
+ // Ensure active core is cores[2] and test
+ err = cores[1].StepDown(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "sys/step-down",
+ ClientToken: root,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ time.Sleep(clusterTestPausePeriod)
+ _ = cores[0].StepDown(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "sys/step-down",
+ ClientToken: root,
+ })
+ time.Sleep(clusterTestPausePeriod)
+ TestWaitActive(t, cores[2].Core)
+ testCluster_ForwardRequests(t, cores[0], "core3")
+ testCluster_ForwardRequests(t, cores[1], "core3")
+}
+
+func testCluster_ForwardRequests(t *testing.T, c *TestClusterCore, remoteCoreID string) {
+ standby, err := c.Standby()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !standby {
+ t.Fatal("expected core to be standby")
+ }
+
+ // We need to call Leader as that refreshes the connection info
+ isLeader, _, err := c.Leader()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if isLeader {
+ t.Fatal("core should not be leader")
+ }
+
+ bodBuf := bytes.NewReader([]byte(`{ "foo": "bar", "zip": "zap" }`))
+ req, err := http.NewRequest("PUT", "https://pushit.real.good:9281/"+remoteCoreID, bodBuf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ req.Header.Add("X-Vault-Token", c.Root)
+
+ statusCode, header, respBytes, err := c.ForwardRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if header == nil {
+ t.Fatal("err: expected at least a content-type header")
+ }
+ if header.Get("Content-Type") != "application/json" {
+ t.Fatalf("bad content-type: %s", header.Get("Content-Type"))
+ }
+
+ body := string(respBytes)
+
+ if body != remoteCoreID {
+ t.Fatalf("expected %s, got %s", remoteCoreID, body)
+ }
+ switch body {
+ case "core1":
+ if statusCode != 201 {
+ t.Fatal("bad response")
+ }
+ case "core2":
+ if statusCode != 202 {
+ t.Fatal("bad response")
+ }
+ case "core3":
+ if statusCode != 203 {
+ t.Fatal("bad response")
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/core.go b/vendor/github.com/hashicorp/vault/vault/core.go
new file mode 100644
index 0000000..396a2bc
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/core.go
@@ -0,0 +1,1775 @@
+package vault
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/subtle"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "net"
+ "net/http"
+ "net/url"
+ "sync"
+ "time"
+
+ "github.com/armon/go-metrics"
+ log "github.com/mgutz/logxi/v1"
+
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+
+ "github.com/hashicorp/errwrap"
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/audit"
+ "github.com/hashicorp/vault/helper/consts"
+ "github.com/hashicorp/vault/helper/errutil"
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/helper/mlock"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/physical"
+ "github.com/hashicorp/vault/shamir"
+)
+
+const (
+ // coreLockPath is the path used to acquire a coordinating lock
+ // for a highly-available deploy.
+ coreLockPath = "core/lock"
+
+ // The poison pill is used as a check during certain scenarios to indicate
+ // to standby nodes that they should seal
+ poisonPillPath = "core/poison-pill"
+
+ // coreLeaderPrefix is the prefix used for the UUID that contains
+ // the currently elected leader.
+ coreLeaderPrefix = "core/leader/"
+
+ // lockRetryInterval is the interval we re-attempt to acquire the
+ // HA lock if an error is encountered
+ lockRetryInterval = 10 * time.Second
+
+ // keyRotateCheckInterval is how often a standby checks for a key
+ // rotation taking place.
+ keyRotateCheckInterval = 30 * time.Second
+
+ // keyRotateGracePeriod is how long we allow an upgrade path
+ // for standby instances before we delete the upgrade keys
+ keyRotateGracePeriod = 2 * time.Minute
+
+ // leaderPrefixCleanDelay is how long to wait between deletions
+ // of orphaned leader keys, to prevent slamming the backend.
+ leaderPrefixCleanDelay = 200 * time.Millisecond
+
+ // coreKeyringCanaryPath is used as a canary to indicate to replicated
+ // clusters that they need to perform a rekey operation synchronously; this
+ // isn't keyring-canary to avoid ignoring it when ignoring core/keyring
+ coreKeyringCanaryPath = "core/canary-keyring"
+)
+
+var (
+ // ErrAlreadyInit is returned if the core is already
+ // initialized. This prevents a re-initialization.
+ ErrAlreadyInit = errors.New("Vault is already initialized")
+
+ // ErrNotInit is returned if a non-initialized barrier
+ // is attempted to be unsealed.
+ ErrNotInit = errors.New("Vault is not initialized")
+
+ // ErrInternalError is returned when we don't want to leak
+ // any information about an internal error
+ ErrInternalError = errors.New("internal error")
+
+ // ErrHANotEnabled is returned if the operation only makes sense
+ // in an HA setting
+ ErrHANotEnabled = errors.New("Vault is not configured for highly-available mode")
+
+ // manualStepDownSleepPeriod is how long to sleep after a user-initiated
+ // step down of the active node, to prevent instantly regrabbing the lock.
+ // It's var not const so that tests can manipulate it.
+ manualStepDownSleepPeriod = 10 * time.Second
+
+ // Functions only in the Enterprise version
+ enterprisePostUnseal = enterprisePostUnsealImpl
+ enterprisePreSeal = enterprisePreSealImpl
+ startReplication = startReplicationImpl
+ stopReplication = stopReplicationImpl
+ LastRemoteWAL = lastRemoteWALImpl
+)
+
+// ReloadFunc are functions that are called when a reload is requested.
+type ReloadFunc func(map[string]string) error
+
+// NonFatalError is an error that can be returned during NewCore that should be
+// displayed but not cause a program exit
+type NonFatalError struct {
+ Err error
+}
+
+func (e *NonFatalError) WrappedErrors() []error {
+ return []error{e.Err}
+}
+
+func (e *NonFatalError) Error() string {
+ return e.Err.Error()
+}
+
+// ErrInvalidKey is returned if there is a user-based error with a provided
+// unseal key. This will be shown to the user, so should not contain
+// information that is sensitive.
+type ErrInvalidKey struct {
+ Reason string
+}
+
+func (e *ErrInvalidKey) Error() string {
+ return fmt.Sprintf("invalid key: %v", e.Reason)
+}
+
+type activeAdvertisement struct {
+ RedirectAddr string `json:"redirect_addr"`
+ ClusterAddr string `json:"cluster_addr,omitempty"`
+ ClusterCert []byte `json:"cluster_cert,omitempty"`
+ ClusterKeyParams *clusterKeyParams `json:"cluster_key_params,omitempty"`
+}
+
+type unlockInformation struct {
+ Parts [][]byte
+ Nonce string
+}
+
+// Core is used as the central manager of Vault activity. It is the primary point of
+// interface for API handlers and is responsible for managing the logical and physical
+// backends, router, security barrier, and audit trails.
+type Core struct {
+ // N.B.: This is used to populate a dev token down replication, as
+ // otherwise, after replication is started, a dev would have to go through
+ // the generate-root process simply to talk to the new follower cluster.
+ devToken string
+
+ // HABackend may be available depending on the physical backend
+ ha physical.HABackend
+
+ // redirectAddr is the address we advertise as leader if held
+ redirectAddr string
+
+ // clusterAddr is the address we use for clustering
+ clusterAddr string
+
+ // physical backend is the un-trusted backend with durable data
+ physical physical.Backend
+
+ // Our Seal, for seal configuration information
+ seal Seal
+
+ // barrier is the security barrier wrapping the physical backend
+ barrier SecurityBarrier
+
+ // router is responsible for managing the mount points for logical backends.
+ router *Router
+
+ // logicalBackends is the mapping of backends to use for this core
+ logicalBackends map[string]logical.Factory
+
+ // credentialBackends is the mapping of backends to use for this core
+ credentialBackends map[string]logical.Factory
+
+ // auditBackends is the mapping of backends to use for this core
+ auditBackends map[string]audit.Factory
+
+ // stateLock protects mutable state
+ stateLock sync.RWMutex
+ sealed bool
+
+ standby bool
+ standbyDoneCh chan struct{}
+ standbyStopCh chan struct{}
+ manualStepDownCh chan struct{}
+
+ // unlockInfo has the keys provided to Unseal until the threshold number of parts is available, as well as the operation nonce
+ unlockInfo *unlockInformation
+
+ // generateRootProgress holds the shares until we reach enough
+ // to verify the master key
+ generateRootConfig *GenerateRootConfig
+ generateRootProgress [][]byte
+ generateRootLock sync.Mutex
+
+ // These variables holds the config and shares we have until we reach
+ // enough to verify the appropriate master key. Note that the same lock is
+ // used; this isn't time-critical so this shouldn't be a problem.
+ barrierRekeyConfig *SealConfig
+ barrierRekeyProgress [][]byte
+ recoveryRekeyConfig *SealConfig
+ recoveryRekeyProgress [][]byte
+ rekeyLock sync.RWMutex
+
+ // mounts is loaded after unseal since it is a protected
+ // configuration
+ mounts *MountTable
+
+ // mountsLock is used to ensure that the mounts table does not
+ // change underneath a calling function
+ mountsLock sync.RWMutex
+
+ // auth is loaded after unseal since it is a protected
+ // configuration
+ auth *MountTable
+
+ // authLock is used to ensure that the auth table does not
+ // change underneath a calling function
+ authLock sync.RWMutex
+
+ // audit is loaded after unseal since it is a protected
+ // configuration
+ audit *MountTable
+
+ // auditLock is used to ensure that the audit table does not
+ // change underneath a calling function
+ auditLock sync.RWMutex
+
+ // auditBroker is used to ingest the audit events and fan
+ // out into the configured audit backends
+ auditBroker *AuditBroker
+
+ // auditedHeaders is used to configure which http headers
+ // can be output in the audit logs
+ auditedHeaders *AuditedHeadersConfig
+
+ // systemBarrierView is the barrier view for the system backend
+ systemBarrierView *BarrierView
+
+ // expiration manager is used for managing LeaseIDs,
+ // renewal, expiration and revocation
+ expiration *ExpirationManager
+
+ // rollback manager is used to run rollbacks periodically
+ rollback *RollbackManager
+
+ // policy store is used to manage named ACL policies
+ policyStore *PolicyStore
+
+ // token store is used to manage authentication tokens
+ tokenStore *TokenStore
+
+ // metricsCh is used to stop the metrics streaming
+ metricsCh chan struct{}
+
+ // metricsMutex is used to prevent a race condition between
+ // metrics emission and sealing leading to a nil pointer
+ metricsMutex sync.Mutex
+
+ defaultLeaseTTL time.Duration
+ maxLeaseTTL time.Duration
+
+ logger log.Logger
+
+ // cachingDisabled indicates whether caches are disabled
+ cachingDisabled bool
+
+ // reloadFuncs is a map containing reload functions
+ reloadFuncs map[string][]ReloadFunc
+
+ // reloadFuncsLock controlls access to the funcs
+ reloadFuncsLock sync.RWMutex
+
+ // wrappingJWTKey is the key used for generating JWTs containing response
+ // wrapping information
+ wrappingJWTKey *ecdsa.PrivateKey
+
+ //
+ // Cluster information
+ //
+ // Name
+ clusterName string
+ // Used to modify cluster parameters
+ clusterParamsLock sync.RWMutex
+ // The private key stored in the barrier used for establishing
+ // mutually-authenticated connections between Vault cluster members
+ localClusterPrivateKey crypto.Signer
+ // The local cluster cert
+ localClusterCert []byte
+ // The parsed form of the local cluster cert
+ localClusterParsedCert *x509.Certificate
+ // The TCP addresses we should use for clustering
+ clusterListenerAddrs []*net.TCPAddr
+ // The setup function that gives us the handler to use
+ clusterHandlerSetupFunc func() (http.Handler, http.Handler)
+ // Tracks whether cluster listeners are running, e.g. it's safe to send a
+ // shutdown down the channel
+ clusterListenersRunning bool
+ // Shutdown channel for the cluster listeners
+ clusterListenerShutdownCh chan struct{}
+ // Shutdown success channel. We need this to be done serially to ensure
+ // that binds are removed before they might be reinstated.
+ clusterListenerShutdownSuccessCh chan struct{}
+ // Connection info containing a client and a current active address
+ requestForwardingConnection *activeConnection
+ // Write lock used to ensure that we don't have multiple connections adjust
+ // this value at the same time
+ requestForwardingConnectionLock sync.RWMutex
+ // Most recent leader UUID. Used to avoid repeatedly JSON parsing the same
+ // values.
+ clusterLeaderUUID string
+ // Most recent leader redirect addr
+ clusterLeaderRedirectAddr string
+ // Lock for the cluster leader values
+ clusterLeaderParamsLock sync.RWMutex
+ // The grpc Server that handles server RPC calls
+ rpcServer *grpc.Server
+ // The function for canceling the client connection
+ rpcClientConnCancelFunc context.CancelFunc
+ // The grpc ClientConn for RPC calls
+ rpcClientConn *grpc.ClientConn
+ // The grpc forwarding client
+ rpcForwardingClient RequestForwardingClient
+
+ // replicationState keeps the current replication state cached for quick
+ // lookup
+ replicationState consts.ReplicationState
+
+ // uiEnabled indicates whether Vault Web UI is enabled or not
+ uiEnabled bool
+}
+
+// CoreConfig is used to parameterize a core
+type CoreConfig struct {
+ DevToken string `json:"dev_token" structs:"dev_token" mapstructure:"dev_token"`
+
+ LogicalBackends map[string]logical.Factory `json:"logical_backends" structs:"logical_backends" mapstructure:"logical_backends"`
+
+ CredentialBackends map[string]logical.Factory `json:"credential_backends" structs:"credential_backends" mapstructure:"credential_backends"`
+
+ AuditBackends map[string]audit.Factory `json:"audit_backends" structs:"audit_backends" mapstructure:"audit_backends"`
+
+ Physical physical.Backend `json:"physical" structs:"physical" mapstructure:"physical"`
+
+ // May be nil, which disables HA operations
+ HAPhysical physical.HABackend `json:"ha_physical" structs:"ha_physical" mapstructure:"ha_physical"`
+
+ Seal Seal `json:"seal" structs:"seal" mapstructure:"seal"`
+
+ Logger log.Logger `json:"logger" structs:"logger" mapstructure:"logger"`
+
+ // Disables the LRU cache on the physical backend
+ DisableCache bool `json:"disable_cache" structs:"disable_cache" mapstructure:"disable_cache"`
+
+ // Disables mlock syscall
+ DisableMlock bool `json:"disable_mlock" structs:"disable_mlock" mapstructure:"disable_mlock"`
+
+ // Custom cache size for the LRU cache on the physical backend, or zero for default
+ CacheSize int `json:"cache_size" structs:"cache_size" mapstructure:"cache_size"`
+
+ // Set as the leader address for HA
+ RedirectAddr string `json:"redirect_addr" structs:"redirect_addr" mapstructure:"redirect_addr"`
+
+ // Set as the cluster address for HA
+ ClusterAddr string `json:"cluster_addr" structs:"cluster_addr" mapstructure:"cluster_addr"`
+
+ DefaultLeaseTTL time.Duration `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
+
+ MaxLeaseTTL time.Duration `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
+
+ ClusterName string `json:"cluster_name" structs:"cluster_name" mapstructure:"cluster_name"`
+
+ EnableUI bool `json:"ui" structs:"ui" mapstructure:"ui"`
+
+ ReloadFuncs *map[string][]ReloadFunc
+ ReloadFuncsLock *sync.RWMutex
+}
+
+// NewCore is used to construct a new core
+func NewCore(conf *CoreConfig) (*Core, error) {
+ if conf.HAPhysical != nil && conf.HAPhysical.HAEnabled() {
+ if conf.RedirectAddr == "" {
+ return nil, fmt.Errorf("missing redirect address")
+ }
+ }
+
+ if conf.DefaultLeaseTTL == 0 {
+ conf.DefaultLeaseTTL = defaultLeaseTTL
+ }
+ if conf.MaxLeaseTTL == 0 {
+ conf.MaxLeaseTTL = maxLeaseTTL
+ }
+ if conf.DefaultLeaseTTL > conf.MaxLeaseTTL {
+ return nil, fmt.Errorf("cannot have DefaultLeaseTTL larger than MaxLeaseTTL")
+ }
+
+ // Validate the advertise addr if its given to us
+ if conf.RedirectAddr != "" {
+ u, err := url.Parse(conf.RedirectAddr)
+ if err != nil {
+ return nil, fmt.Errorf("redirect address is not valid url: %s", err)
+ }
+
+ if u.Scheme == "" {
+ return nil, fmt.Errorf("redirect address must include scheme (ex. 'http')")
+ }
+ }
+
+ // Make a default logger if not provided
+ if conf.Logger == nil {
+ conf.Logger = logformat.NewVaultLogger(log.LevelTrace)
+ }
+
+ // Setup the core
+ c := &Core{
+ devToken: conf.DevToken,
+ physical: conf.Physical,
+ redirectAddr: conf.RedirectAddr,
+ clusterAddr: conf.ClusterAddr,
+ seal: conf.Seal,
+ router: NewRouter(),
+ sealed: true,
+ standby: true,
+ logger: conf.Logger,
+ defaultLeaseTTL: conf.DefaultLeaseTTL,
+ maxLeaseTTL: conf.MaxLeaseTTL,
+ cachingDisabled: conf.DisableCache,
+ clusterName: conf.ClusterName,
+ clusterListenerShutdownCh: make(chan struct{}),
+ clusterListenerShutdownSuccessCh: make(chan struct{}),
+ }
+
+ // Wrap the physical backend in a cache layer if enabled and not already wrapped
+ if _, isCache := conf.Physical.(*physical.Cache); !conf.DisableCache && !isCache {
+ c.physical = physical.NewCache(conf.Physical, conf.CacheSize, conf.Logger)
+ }
+
+ if !conf.DisableMlock {
+ // Ensure our memory usage is locked into physical RAM
+ if err := mlock.LockMemory(); err != nil {
+ return nil, fmt.Errorf(
+ "Failed to lock memory: %v\n\n"+
+ "This usually means that the mlock syscall is not available.\n"+
+ "Vault uses mlock to prevent memory from being swapped to\n"+
+ "disk. This requires root privileges as well as a machine\n"+
+ "that supports mlock. Please enable mlock on your system or\n"+
+ "disable Vault from using it. To disable Vault from using it,\n"+
+ "set the `disable_mlock` configuration option in your configuration\n"+
+ "file.",
+ err)
+ }
+ }
+
+ // Construct a new AES-GCM barrier
+ var err error
+ c.barrier, err = NewAESGCMBarrier(c.physical)
+ if err != nil {
+ return nil, fmt.Errorf("barrier setup failed: %v", err)
+ }
+
+ if conf.HAPhysical != nil && conf.HAPhysical.HAEnabled() {
+ c.ha = conf.HAPhysical
+ }
+
+ // We create the funcs here, then populate the given config with it so that
+ // the caller can share state
+ conf.ReloadFuncsLock = &c.reloadFuncsLock
+ c.reloadFuncsLock.Lock()
+ c.reloadFuncs = make(map[string][]ReloadFunc)
+ c.reloadFuncsLock.Unlock()
+ conf.ReloadFuncs = &c.reloadFuncs
+
+ // Setup the backends
+ logicalBackends := make(map[string]logical.Factory)
+ for k, f := range conf.LogicalBackends {
+ logicalBackends[k] = f
+ }
+ _, ok := logicalBackends["generic"]
+ if !ok {
+ logicalBackends["generic"] = PassthroughBackendFactory
+ }
+ logicalBackends["cubbyhole"] = CubbyholeBackendFactory
+ logicalBackends["system"] = func(config *logical.BackendConfig) (logical.Backend, error) {
+ return NewSystemBackend(c, config)
+ }
+ c.logicalBackends = logicalBackends
+
+ credentialBackends := make(map[string]logical.Factory)
+ for k, f := range conf.CredentialBackends {
+ credentialBackends[k] = f
+ }
+ credentialBackends["token"] = func(config *logical.BackendConfig) (logical.Backend, error) {
+ return NewTokenStore(c, config)
+ }
+ c.credentialBackends = credentialBackends
+
+ auditBackends := make(map[string]audit.Factory)
+ for k, f := range conf.AuditBackends {
+ auditBackends[k] = f
+ }
+ c.auditBackends = auditBackends
+
+ if c.seal == nil {
+ c.seal = &DefaultSeal{}
+ }
+ c.seal.SetCore(c)
+
+ // Attempt unsealing with stored keys; if there are no stored keys this
+ // returns nil, otherwise returns nil or an error
+ storedKeyErr := c.UnsealWithStoredKeys()
+
+ return c, storedKeyErr
+}
+
+// Shutdown is invoked when the Vault instance is about to be terminated. It
+// should not be accessible as part of an API call as it will cause an availability
+// problem. It is only used to gracefully quit in the case of HA so that failover
+// happens as quickly as possible.
+func (c *Core) Shutdown() error {
+ c.stateLock.Lock()
+ defer c.stateLock.Unlock()
+ if c.sealed {
+ return nil
+ }
+
+ // Seal the Vault, causes a leader stepdown
+ return c.sealInternal()
+}
+
+// LookupToken returns the properties of the token from the token store. This
+// is particularly useful to fetch the accessor of the client token and get it
+// populated in the logical request along with the client token. The accessor
+// of the client token can get audit logged.
+func (c *Core) LookupToken(token string) (*TokenEntry, error) {
+ if token == "" {
+ return nil, fmt.Errorf("missing client token")
+ }
+
+ c.stateLock.RLock()
+ defer c.stateLock.RUnlock()
+ if c.sealed {
+ return nil, consts.ErrSealed
+ }
+ if c.standby {
+ return nil, consts.ErrStandby
+ }
+
+ // Many tests don't have a token store running
+ if c.tokenStore == nil {
+ return nil, nil
+ }
+
+ return c.tokenStore.Lookup(token)
+}
+
+func (c *Core) fetchACLandTokenEntry(req *logical.Request) (*ACL, *TokenEntry, error) {
+ defer metrics.MeasureSince([]string{"core", "fetch_acl_and_token"}, time.Now())
+
+ // Ensure there is a client token
+ if req.ClientToken == "" {
+ return nil, nil, fmt.Errorf("missing client token")
+ }
+
+ if c.tokenStore == nil {
+ c.logger.Error("core: token store is unavailable")
+ return nil, nil, ErrInternalError
+ }
+
+ // Resolve the token policy
+ te, err := c.tokenStore.Lookup(req.ClientToken)
+ if err != nil {
+ c.logger.Error("core: failed to lookup token", "error", err)
+ return nil, nil, ErrInternalError
+ }
+
+ // Ensure the token is valid
+ if te == nil {
+ return nil, nil, logical.ErrPermissionDenied
+ }
+
+ // Construct the corresponding ACL object
+ acl, err := c.policyStore.ACL(te.Policies...)
+ if err != nil {
+ c.logger.Error("core: failed to construct ACL", "error", err)
+ return nil, nil, ErrInternalError
+ }
+
+ return acl, te, nil
+}
+
+func (c *Core) checkToken(req *logical.Request) (*logical.Auth, *TokenEntry, error) {
+ defer metrics.MeasureSince([]string{"core", "check_token"}, time.Now())
+
+ acl, te, err := c.fetchACLandTokenEntry(req)
+ if err != nil {
+ return nil, te, err
+ }
+
+ // Check if this is a root protected path
+ rootPath := c.router.RootPath(req.Path)
+
+ // When we receive a write of either type, rather than require clients to
+ // PUT/POST and trust the operation, we ask the backend to give us the real
+ // skinny -- if the backend implements an existence check, it can tell us
+ // whether a particular resource exists. Then we can mark it as an update
+ // or creation as appropriate.
+ if req.Operation == logical.CreateOperation || req.Operation == logical.UpdateOperation {
+ checkExists, resourceExists, err := c.router.RouteExistenceCheck(req)
+ switch err {
+ case logical.ErrUnsupportedPath:
+ // fail later via bad path to avoid confusing items in the log
+ checkExists = false
+ case nil:
+ // Continue on
+ default:
+ c.logger.Error("core: failed to run existence check", "error", err)
+ if _, ok := err.(errutil.UserError); ok {
+ return nil, nil, err
+ } else {
+ return nil, nil, ErrInternalError
+ }
+ }
+
+ switch {
+ case checkExists == false:
+ // No existence check, so always treate it as an update operation, which is how it is pre 0.5
+ req.Operation = logical.UpdateOperation
+ case resourceExists == true:
+ // It exists, so force an update operation
+ req.Operation = logical.UpdateOperation
+ case resourceExists == false:
+ // It doesn't exist, force a create operation
+ req.Operation = logical.CreateOperation
+ default:
+ panic("unreachable code")
+ }
+ }
+
+ // Check the standard non-root ACLs. Return the token entry if it's not
+ // allowed so we can decrement the use count.
+ allowed, rootPrivs := acl.AllowOperation(req)
+ if !allowed {
+ return nil, te, logical.ErrPermissionDenied
+ }
+ if rootPath && !rootPrivs {
+ return nil, te, logical.ErrPermissionDenied
+ }
+
+ // Create the auth response
+ auth := &logical.Auth{
+ ClientToken: req.ClientToken,
+ Policies: te.Policies,
+ Metadata: te.Meta,
+ DisplayName: te.DisplayName,
+ }
+ return auth, te, nil
+}
+
+// Sealed checks if the Vault is current sealed
+func (c *Core) Sealed() (bool, error) {
+ c.stateLock.RLock()
+ defer c.stateLock.RUnlock()
+ return c.sealed, nil
+}
+
+// Standby checks if the Vault is in standby mode
+func (c *Core) Standby() (bool, error) {
+ c.stateLock.RLock()
+ defer c.stateLock.RUnlock()
+ return c.standby, nil
+}
+
+// Leader is used to get the current active leader
+func (c *Core) Leader() (isLeader bool, leaderAddr string, err error) {
+ c.stateLock.RLock()
+ defer c.stateLock.RUnlock()
+
+ // Check if sealed
+ if c.sealed {
+ return false, "", consts.ErrSealed
+ }
+
+ // Check if HA enabled
+ if c.ha == nil {
+ return false, "", ErrHANotEnabled
+ }
+
+ // Check if we are the leader
+ if !c.standby {
+ return true, c.redirectAddr, nil
+ }
+
+ // Initialize a lock
+ lock, err := c.ha.LockWith(coreLockPath, "read")
+ if err != nil {
+ return false, "", err
+ }
+
+ // Read the value
+ held, leaderUUID, err := lock.Value()
+ if err != nil {
+ return false, "", err
+ }
+ if !held {
+ return false, "", nil
+ }
+
+ c.clusterLeaderParamsLock.RLock()
+ localLeaderUUID := c.clusterLeaderUUID
+ localRedirAddr := c.clusterLeaderRedirectAddr
+ c.clusterLeaderParamsLock.RUnlock()
+
+ // If the leader hasn't changed, return the cached value; nothing changes
+ // mid-leadership, and the barrier caches anyways
+ if leaderUUID == localLeaderUUID && localRedirAddr != "" {
+ return false, localRedirAddr, nil
+ }
+
+ c.logger.Trace("core: found new active node information, refreshing")
+
+ c.clusterLeaderParamsLock.Lock()
+ defer c.clusterLeaderParamsLock.Unlock()
+
+ // Validate base conditions again
+ if leaderUUID == c.clusterLeaderUUID && c.clusterLeaderRedirectAddr != "" {
+ return false, localRedirAddr, nil
+ }
+
+ key := coreLeaderPrefix + leaderUUID
+ entry, err := c.barrier.Get(key)
+ if err != nil {
+ return false, "", err
+ }
+ if entry == nil {
+ return false, "", nil
+ }
+
+ var oldAdv bool
+
+ var adv activeAdvertisement
+ err = jsonutil.DecodeJSON(entry.Value, &adv)
+ if err != nil {
+ // Fall back to pre-struct handling
+ adv.RedirectAddr = string(entry.Value)
+ c.logger.Trace("core: parsed redirect addr for new active node", "redirect_addr", adv.RedirectAddr)
+ oldAdv = true
+ }
+
+ if !oldAdv {
+ c.logger.Trace("core: parsing information for new active node", "active_cluster_addr", adv.ClusterAddr, "active_redirect_addr", adv.RedirectAddr)
+
+ // Ensure we are using current values
+ err = c.loadLocalClusterTLS(adv)
+ if err != nil {
+ return false, "", err
+ }
+
+ // This will ensure that we both have a connection at the ready and that
+ // the address is the current known value
+ err = c.refreshRequestForwardingConnection(adv.ClusterAddr)
+ if err != nil {
+ return false, "", err
+ }
+ }
+
+ // Don't set these until everything has been parsed successfully or we'll
+ // never try again
+ c.clusterLeaderRedirectAddr = adv.RedirectAddr
+ c.clusterLeaderUUID = leaderUUID
+
+ return false, adv.RedirectAddr, nil
+}
+
+// SecretProgress returns the number of keys provided so far
+func (c *Core) SecretProgress() (int, string) {
+ c.stateLock.RLock()
+ defer c.stateLock.RUnlock()
+ switch c.unlockInfo {
+ case nil:
+ return 0, ""
+ default:
+ return len(c.unlockInfo.Parts), c.unlockInfo.Nonce
+ }
+}
+
+// ResetUnsealProcess removes the current unlock parts from memory, to reset
+// the unsealing process
+func (c *Core) ResetUnsealProcess() {
+ c.stateLock.Lock()
+ defer c.stateLock.Unlock()
+ if !c.sealed {
+ return
+ }
+ c.unlockInfo = nil
+}
+
+// Unseal is used to provide one of the key parts to unseal the Vault.
+//
+// They key given as a parameter will automatically be zerod after
+// this method is done with it. If you want to keep the key around, a copy
+// should be made.
+func (c *Core) Unseal(key []byte) (bool, error) {
+ defer metrics.MeasureSince([]string{"core", "unseal"}, time.Now())
+
+ // Verify the key length
+ min, max := c.barrier.KeyLength()
+ max += shamir.ShareOverhead
+ if len(key) < min {
+ return false, &ErrInvalidKey{fmt.Sprintf("key is shorter than minimum %d bytes", min)}
+ }
+ if len(key) > max {
+ return false, &ErrInvalidKey{fmt.Sprintf("key is longer than maximum %d bytes", max)}
+ }
+
+ // Get the seal configuration
+ config, err := c.seal.BarrierConfig()
+ if err != nil {
+ return false, err
+ }
+
+ // Ensure the barrier is initialized
+ if config == nil {
+ return false, ErrNotInit
+ }
+
+ c.stateLock.Lock()
+ defer c.stateLock.Unlock()
+
+ // Check if already unsealed
+ if !c.sealed {
+ return true, nil
+ }
+
+ masterKey, err := c.unsealPart(config, key)
+ if err != nil {
+ return false, err
+ }
+ if masterKey != nil {
+ return c.unsealInternal(masterKey)
+ }
+
+ return false, nil
+}
+
+func (c *Core) unsealPart(config *SealConfig, key []byte) ([]byte, error) {
+ // Check if we already have this piece
+ if c.unlockInfo != nil {
+ for _, existing := range c.unlockInfo.Parts {
+ if subtle.ConstantTimeCompare(existing, key) == 1 {
+ return nil, nil
+ }
+ }
+ } else {
+ uuid, err := uuid.GenerateUUID()
+ if err != nil {
+ return nil, err
+ }
+ c.unlockInfo = &unlockInformation{
+ Nonce: uuid,
+ }
+ }
+
+ // Store this key
+ c.unlockInfo.Parts = append(c.unlockInfo.Parts, key)
+
+ // Check if we don't have enough keys to unlock
+ if len(c.unlockInfo.Parts) < config.SecretThreshold {
+ if c.logger.IsDebug() {
+ c.logger.Debug("core: cannot unseal, not enough keys", "keys", len(c.unlockInfo.Parts), "threshold", config.SecretThreshold, "nonce", c.unlockInfo.Nonce)
+ }
+ return nil, nil
+ }
+
+ // Best-effort memzero of unlock parts once we're done with them
+ defer func() {
+ for i, _ := range c.unlockInfo.Parts {
+ memzero(c.unlockInfo.Parts[i])
+ }
+ c.unlockInfo = nil
+ }()
+
+ // Recover the master key
+ var masterKey []byte
+ var err error
+ if config.SecretThreshold == 1 {
+ masterKey = make([]byte, len(c.unlockInfo.Parts[0]))
+ copy(masterKey, c.unlockInfo.Parts[0])
+ } else {
+ masterKey, err = shamir.Combine(c.unlockInfo.Parts)
+ if err != nil {
+ return nil, fmt.Errorf("failed to compute master key: %v", err)
+ }
+ }
+
+ return masterKey, nil
+}
+
+// This must be called with the state write lock held
+func (c *Core) unsealInternal(masterKey []byte) (bool, error) {
+ defer memzero(masterKey)
+
+ // Attempt to unlock
+ if err := c.barrier.Unseal(masterKey); err != nil {
+ return false, err
+ }
+ if c.logger.IsInfo() {
+ c.logger.Info("core: vault is unsealed")
+ }
+
+ // Do post-unseal setup if HA is not enabled
+ if c.ha == nil {
+ // We still need to set up cluster info even if it's not part of a
+ // cluster right now. This also populates the cached cluster object.
+ if err := c.setupCluster(); err != nil {
+ c.logger.Error("core: cluster setup failed", "error", err)
+ c.barrier.Seal()
+ c.logger.Warn("core: vault is sealed")
+ return false, err
+ }
+
+ if err := c.postUnseal(); err != nil {
+ c.logger.Error("core: post-unseal setup failed", "error", err)
+ c.barrier.Seal()
+ c.logger.Warn("core: vault is sealed")
+ return false, err
+ }
+
+ c.standby = false
+ } else {
+ // Go to standby mode, wait until we are active to unseal
+ c.standbyDoneCh = make(chan struct{})
+ c.standbyStopCh = make(chan struct{})
+ c.manualStepDownCh = make(chan struct{})
+ go c.runStandby(c.standbyDoneCh, c.standbyStopCh, c.manualStepDownCh)
+ }
+
+ // Success!
+ c.sealed = false
+ if c.ha != nil {
+ sd, ok := c.ha.(physical.ServiceDiscovery)
+ if ok {
+ if err := sd.NotifySealedStateChange(); err != nil {
+ if c.logger.IsWarn() {
+ c.logger.Warn("core: failed to notify unsealed status", "error", err)
+ }
+ }
+ }
+ }
+ return true, nil
+}
+
+// SealWithRequest takes in a logical.Request, acquires the lock, and passes
+// through to sealInternal
+func (c *Core) SealWithRequest(req *logical.Request) error {
+ defer metrics.MeasureSince([]string{"core", "seal-with-request"}, time.Now())
+
+ c.stateLock.Lock()
+ defer c.stateLock.Unlock()
+
+ if c.sealed {
+ return nil
+ }
+
+ return c.sealInitCommon(req)
+}
+
+// Seal takes in a token and creates a logical.Request, acquires the lock, and
+// passes through to sealInternal
+func (c *Core) Seal(token string) error {
+ defer metrics.MeasureSince([]string{"core", "seal"}, time.Now())
+
+ c.stateLock.Lock()
+ defer c.stateLock.Unlock()
+
+ if c.sealed {
+ return nil
+ }
+
+ req := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "sys/seal",
+ ClientToken: token,
+ }
+
+ return c.sealInitCommon(req)
+}
+
+// sealInitCommon is common logic for Seal and SealWithRequest and is used to
+// re-seal the Vault. This requires the Vault to be unsealed again to perform
+// any further operations.
+func (c *Core) sealInitCommon(req *logical.Request) (retErr error) {
+ defer metrics.MeasureSince([]string{"core", "seal-internal"}, time.Now())
+
+ if req == nil {
+ retErr = multierror.Append(retErr, errors.New("nil request to seal"))
+ return retErr
+ }
+
+ // Validate the token is a root token
+ acl, te, err := c.fetchACLandTokenEntry(req)
+ if err != nil {
+ // Since there is no token store in standby nodes, sealing cannot
+ // be done. Ideally, the request has to be forwarded to leader node
+ // for validation and the operation should be performed. But for now,
+ // just returning with an error and recommending a vault restart, which
+ // essentially does the same thing.
+ if c.standby {
+ c.logger.Error("core: vault cannot seal when in standby mode; please restart instead")
+ retErr = multierror.Append(retErr, errors.New("vault cannot seal when in standby mode; please restart instead"))
+ return retErr
+ }
+ retErr = multierror.Append(retErr, err)
+ return retErr
+ }
+
+ // Audit-log the request before going any further
+ auth := &logical.Auth{
+ ClientToken: req.ClientToken,
+ Policies: te.Policies,
+ Metadata: te.Meta,
+ DisplayName: te.DisplayName,
+ }
+
+ if err := c.auditBroker.LogRequest(auth, req, c.auditedHeaders, nil); err != nil {
+ c.logger.Error("core: failed to audit request", "request_path", req.Path, "error", err)
+ retErr = multierror.Append(retErr, errors.New("failed to audit request, cannot continue"))
+ return retErr
+ }
+
+ // Attempt to use the token (decrement num_uses)
+ // On error bail out; if the token has been revoked, bail out too
+ if te != nil {
+ te, err = c.tokenStore.UseToken(te)
+ if err != nil {
+ c.logger.Error("core: failed to use token", "error", err)
+ retErr = multierror.Append(retErr, ErrInternalError)
+ return retErr
+ }
+ if te == nil {
+ // Token is no longer valid
+ retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
+ return retErr
+ }
+ if te.NumUses == -1 {
+ // Token needs to be revoked
+ defer func(id string) {
+ err = c.tokenStore.Revoke(id)
+ if err != nil {
+ c.logger.Error("core: token needed revocation after seal but failed to revoke", "error", err)
+ retErr = multierror.Append(retErr, ErrInternalError)
+ }
+ }(te.ID)
+ }
+ }
+
+ // Verify that this operation is allowed
+ allowed, rootPrivs := acl.AllowOperation(req)
+ if !allowed {
+ retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
+ return retErr
+ }
+
+ // We always require root privileges for this operation
+ if !rootPrivs {
+ retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
+ return retErr
+ }
+
+ //Seal the Vault
+ err = c.sealInternal()
+ if err != nil {
+ retErr = multierror.Append(retErr, err)
+ }
+
+ return retErr
+}
+
+// StepDown is used to step down from leadership
+func (c *Core) StepDown(req *logical.Request) (retErr error) {
+ defer metrics.MeasureSince([]string{"core", "step_down"}, time.Now())
+
+ if req == nil {
+ retErr = multierror.Append(retErr, errors.New("nil request to step-down"))
+ return retErr
+ }
+
+ c.stateLock.Lock()
+ defer c.stateLock.Unlock()
+ if c.sealed {
+ return nil
+ }
+ if c.ha == nil || c.standby {
+ return nil
+ }
+
+ acl, te, err := c.fetchACLandTokenEntry(req)
+ if err != nil {
+ retErr = multierror.Append(retErr, err)
+ return retErr
+ }
+
+ // Audit-log the request before going any further
+ auth := &logical.Auth{
+ ClientToken: req.ClientToken,
+ Policies: te.Policies,
+ Metadata: te.Meta,
+ DisplayName: te.DisplayName,
+ }
+
+ if err := c.auditBroker.LogRequest(auth, req, c.auditedHeaders, nil); err != nil {
+ c.logger.Error("core: failed to audit request", "request_path", req.Path, "error", err)
+ retErr = multierror.Append(retErr, errors.New("failed to audit request, cannot continue"))
+ return retErr
+ }
+
+ // Attempt to use the token (decrement num_uses)
+ if te != nil {
+ te, err = c.tokenStore.UseToken(te)
+ if err != nil {
+ c.logger.Error("core: failed to use token", "error", err)
+ retErr = multierror.Append(retErr, ErrInternalError)
+ return retErr
+ }
+ if te == nil {
+ // Token has been revoked
+ retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
+ return retErr
+ }
+ if te.NumUses == -1 {
+ // Token needs to be revoked
+ defer func(id string) {
+ err = c.tokenStore.Revoke(id)
+ if err != nil {
+ c.logger.Error("core: token needed revocation after step-down but failed to revoke", "error", err)
+ retErr = multierror.Append(retErr, ErrInternalError)
+ }
+ }(te.ID)
+ }
+ }
+
+ // Verify that this operation is allowed
+ allowed, rootPrivs := acl.AllowOperation(req)
+ if !allowed {
+ retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
+ return retErr
+ }
+
+ // We always require root privileges for this operation
+ if !rootPrivs {
+ retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
+ return retErr
+ }
+
+ select {
+ case c.manualStepDownCh <- struct{}{}:
+ default:
+ c.logger.Warn("core: manual step-down operation already queued")
+ }
+
+ return retErr
+}
+
+// sealInternal is an internal method used to seal the vault. It does not do
+// any authorization checking. The stateLock must be held prior to calling.
+func (c *Core) sealInternal() error {
+ // Enable that we are sealed to prevent furthur transactions
+ c.sealed = true
+
+ c.logger.Debug("core: marked as sealed")
+
+ // Clear forwarding clients
+ c.requestForwardingConnectionLock.Lock()
+ c.clearForwardingClients()
+ c.requestForwardingConnectionLock.Unlock()
+
+ // Do pre-seal teardown if HA is not enabled
+ if c.ha == nil {
+ // Even in a non-HA context we key off of this for some things
+ c.standby = true
+ if err := c.preSeal(); err != nil {
+ c.logger.Error("core: pre-seal teardown failed", "error", err)
+ return fmt.Errorf("internal error")
+ }
+ } else {
+ // Signal the standby goroutine to shutdown, wait for completion
+ close(c.standbyStopCh)
+
+ // Release the lock while we wait to avoid deadlocking
+ c.stateLock.Unlock()
+ <-c.standbyDoneCh
+ c.stateLock.Lock()
+ }
+
+ c.logger.Debug("core: sealing barrier")
+ if err := c.barrier.Seal(); err != nil {
+ c.logger.Error("core: error sealing barrier", "error", err)
+ return err
+ }
+
+ if c.ha != nil {
+ sd, ok := c.ha.(physical.ServiceDiscovery)
+ if ok {
+ if err := sd.NotifySealedStateChange(); err != nil {
+ if c.logger.IsWarn() {
+ c.logger.Warn("core: failed to notify sealed status", "error", err)
+ }
+ }
+ }
+ }
+
+ c.logger.Info("core: vault is sealed")
+
+ return nil
+}
+
+// postUnseal is invoked after the barrier is unsealed, but before
+// allowing any user operations. This allows us to setup any state that
+// requires the Vault to be unsealed such as mount tables, logical backends,
+// credential stores, etc.
+func (c *Core) postUnseal() (retErr error) {
+ defer metrics.MeasureSince([]string{"core", "post_unseal"}, time.Now())
+ defer func() {
+ if retErr != nil {
+ c.preSeal()
+ }
+ }()
+ c.logger.Info("core: post-unseal setup starting")
+
+ // Clear forwarding clients; we're active
+ c.requestForwardingConnectionLock.Lock()
+ c.clearForwardingClients()
+ c.requestForwardingConnectionLock.Unlock()
+
+ // Purge the backend if supported
+ if purgable, ok := c.physical.(physical.Purgable); ok {
+ purgable.Purge()
+ }
+
+ // Purge these for safety in case of a rekey
+ c.seal.SetBarrierConfig(nil)
+ if c.seal.RecoveryKeySupported() {
+ c.seal.SetRecoveryConfig(nil)
+ }
+
+ if err := enterprisePostUnseal(c); err != nil {
+ return err
+ }
+ if err := c.ensureWrappingKey(); err != nil {
+ return err
+ }
+ if err := c.loadMounts(); err != nil {
+ return err
+ }
+ if err := c.setupMounts(); err != nil {
+ return err
+ }
+ if err := c.startRollback(); err != nil {
+ return err
+ }
+ if err := c.setupPolicyStore(); err != nil {
+ return err
+ }
+ if err := c.loadCredentials(); err != nil {
+ return err
+ }
+ if err := c.setupCredentials(); err != nil {
+ return err
+ }
+ if err := c.setupExpiration(); err != nil {
+ return err
+ }
+ if err := c.loadAudits(); err != nil {
+ return err
+ }
+ if err := c.setupAudits(); err != nil {
+ return err
+ }
+ if err := c.setupAuditedHeadersConfig(); err != nil {
+ return err
+ }
+ if c.ha != nil {
+ if err := c.startClusterListener(); err != nil {
+ return err
+ }
+ }
+ c.metricsCh = make(chan struct{})
+ go c.emitMetrics(c.metricsCh)
+ c.logger.Info("core: post-unseal setup complete")
+ return nil
+}
+
+// preSeal is invoked before the barrier is sealed, allowing
+// for any state teardown required.
+func (c *Core) preSeal() error {
+ defer metrics.MeasureSince([]string{"core", "pre_seal"}, time.Now())
+ c.logger.Info("core: pre-seal teardown starting")
+
+ // Clear any rekey progress
+ c.barrierRekeyConfig = nil
+ c.barrierRekeyProgress = nil
+ c.recoveryRekeyConfig = nil
+ c.recoveryRekeyProgress = nil
+
+ if c.metricsCh != nil {
+ close(c.metricsCh)
+ c.metricsCh = nil
+ }
+ var result error
+
+ c.stopClusterListener()
+
+ if err := c.teardownAudits(); err != nil {
+ result = multierror.Append(result, errwrap.Wrapf("error tearing down audits: {{err}}", err))
+ }
+ if err := c.stopExpiration(); err != nil {
+ result = multierror.Append(result, errwrap.Wrapf("error stopping expiration: {{err}}", err))
+ }
+ if err := c.teardownCredentials(); err != nil {
+ result = multierror.Append(result, errwrap.Wrapf("error tearing down credentials: {{err}}", err))
+ }
+ if err := c.teardownPolicyStore(); err != nil {
+ result = multierror.Append(result, errwrap.Wrapf("error tearing down policy store: {{err}}", err))
+ }
+ if err := c.stopRollback(); err != nil {
+ result = multierror.Append(result, errwrap.Wrapf("error stopping rollback: {{err}}", err))
+ }
+ if err := c.unloadMounts(); err != nil {
+ result = multierror.Append(result, errwrap.Wrapf("error unloading mounts: {{err}}", err))
+ }
+ if err := enterprisePreSeal(c); err != nil {
+ result = multierror.Append(result, err)
+ }
+
+ // Purge the backend if supported
+ if purgable, ok := c.physical.(physical.Purgable); ok {
+ purgable.Purge()
+ }
+ c.logger.Info("core: pre-seal teardown complete")
+ return result
+}
+
+func enterprisePostUnsealImpl(c *Core) error {
+ return nil
+}
+
+func enterprisePreSealImpl(c *Core) error {
+ return nil
+}
+
+func startReplicationImpl(c *Core) error {
+ return nil
+}
+
+func stopReplicationImpl(c *Core) error {
+ return nil
+}
+
+// runStandby is a long running routine that is used when an HA backend
+// is enabled. It waits until we are leader and switches this Vault to
+// active.
+func (c *Core) runStandby(doneCh, stopCh, manualStepDownCh chan struct{}) {
+ defer close(doneCh)
+ defer close(manualStepDownCh)
+ c.logger.Info("core: entering standby mode")
+
+ // Monitor for key rotation
+ keyRotateDone := make(chan struct{})
+ keyRotateStop := make(chan struct{})
+ go c.periodicCheckKeyUpgrade(keyRotateDone, keyRotateStop)
+ defer func() {
+ close(keyRotateStop)
+ <-keyRotateDone
+ }()
+
+ for {
+ // Check for a shutdown
+ select {
+ case <-stopCh:
+ return
+ default:
+ }
+
+ // Clear forwarding clients
+ c.requestForwardingConnectionLock.Lock()
+ c.clearForwardingClients()
+ c.requestForwardingConnectionLock.Unlock()
+
+ // Create a lock
+ uuid, err := uuid.GenerateUUID()
+ if err != nil {
+ c.logger.Error("core: failed to generate uuid", "error", err)
+ return
+ }
+ lock, err := c.ha.LockWith(coreLockPath, uuid)
+ if err != nil {
+ c.logger.Error("core: failed to create lock", "error", err)
+ return
+ }
+
+ // Attempt the acquisition
+ leaderLostCh := c.acquireLock(lock, stopCh)
+
+ // Bail if we are being shutdown
+ if leaderLostCh == nil {
+ return
+ }
+ c.logger.Info("core: acquired lock, enabling active operation")
+
+ // This is used later to log a metrics event; this can be helpful to
+ // detect flapping
+ activeTime := time.Now()
+
+ // Grab the lock as we need it for cluster setup, which needs to happen
+ // before advertising;
+ c.stateLock.Lock()
+
+ // This block is used to wipe barrier/seal state and verify that
+ // everything is sane. If we have no sanity in the barrier, we actually
+ // seal, as there's little we can do.
+ {
+ c.seal.SetBarrierConfig(nil)
+ if c.seal.RecoveryKeySupported() {
+ c.seal.SetRecoveryConfig(nil)
+ }
+
+ if err := c.performKeyUpgrades(); err != nil {
+ // We call this in a goroutine so that we can give up the
+ // statelock and have this shut us down; sealInternal has a
+ // workflow where it watches for the stopCh to close so we want
+ // to return from here
+ go c.Shutdown()
+ c.logger.Error("core: error performing key upgrades", "error", err)
+ c.stateLock.Unlock()
+ lock.Unlock()
+ metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime)
+ return
+ }
+ }
+
+ // Clear previous local cluster cert info so we generate new. Since the
+ // UUID will have changed, standbys will know to look for new info
+ c.clusterParamsLock.Lock()
+ c.localClusterCert = nil
+ c.localClusterParsedCert = nil
+ c.localClusterPrivateKey = nil
+ c.clusterParamsLock.Unlock()
+
+ if err := c.setupCluster(); err != nil {
+ c.stateLock.Unlock()
+ c.logger.Error("core: cluster setup failed", "error", err)
+ lock.Unlock()
+ metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime)
+ continue
+ }
+
+ // Advertise as leader
+ if err := c.advertiseLeader(uuid, leaderLostCh); err != nil {
+ c.stateLock.Unlock()
+ c.logger.Error("core: leader advertisement setup failed", "error", err)
+ lock.Unlock()
+ metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime)
+ continue
+ }
+
+ // Attempt the post-unseal process
+ err = c.postUnseal()
+ if err == nil {
+ c.standby = false
+ }
+ c.stateLock.Unlock()
+
+ // Handle a failure to unseal
+ if err != nil {
+ c.logger.Error("core: post-unseal setup failed", "error", err)
+ lock.Unlock()
+ metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime)
+ continue
+ }
+
+ // Monitor a loss of leadership
+ var manualStepDown bool
+ select {
+ case <-leaderLostCh:
+ c.logger.Warn("core: leadership lost, stopping active operation")
+ case <-stopCh:
+ c.logger.Warn("core: stopping active operation")
+ case <-manualStepDownCh:
+ c.logger.Warn("core: stepping down from active operation to standby")
+ manualStepDown = true
+ }
+
+ metrics.MeasureSince([]string{"core", "leadership_lost"}, activeTime)
+
+ // Clear ourself as leader
+ if err := c.clearLeader(uuid); err != nil {
+ c.logger.Error("core: clearing leader advertisement failed", "error", err)
+ }
+
+ // Attempt the pre-seal process
+ c.stateLock.Lock()
+ c.standby = true
+ preSealErr := c.preSeal()
+ c.stateLock.Unlock()
+
+ // Give up leadership
+ lock.Unlock()
+
+ // Check for a failure to prepare to seal
+ if preSealErr != nil {
+ c.logger.Error("core: pre-seal teardown failed", "error", err)
+ }
+
+ // If we've merely stepped down, we could instantly grab the lock
+ // again. Give the other nodes a chance.
+ if manualStepDown {
+ time.Sleep(manualStepDownSleepPeriod)
+ }
+ }
+}
+
+// periodicCheckKeyUpgrade is used to watch for key rotation events as a standby
+func (c *Core) periodicCheckKeyUpgrade(doneCh, stopCh chan struct{}) {
+ defer close(doneCh)
+ for {
+ select {
+ case <-time.After(keyRotateCheckInterval):
+ // Only check if we are a standby
+ c.stateLock.RLock()
+ standby := c.standby
+ c.stateLock.RUnlock()
+ if !standby {
+ continue
+ }
+
+ // Check for a poison pill. If we can read it, it means we have stale
+ // keys (e.g. from replication being activated) and we need to seal to
+ // be unsealed again.
+ entry, _ := c.barrier.Get(poisonPillPath)
+ if entry != nil && len(entry.Value) > 0 {
+ c.logger.Warn("core: encryption keys have changed out from underneath us (possibly due to replication enabling), must be unsealed again")
+ go c.Shutdown()
+ continue
+ }
+
+ if err := c.checkKeyUpgrades(); err != nil {
+ c.logger.Error("core: key rotation periodic upgrade check failed", "error", err)
+ }
+ case <-stopCh:
+ return
+ }
+ }
+}
+
+// checkKeyUpgrades is used to check if there have been any key rotations
+// and if there is a chain of upgrades available
+func (c *Core) checkKeyUpgrades() error {
+ for {
+ // Check for an upgrade
+ didUpgrade, newTerm, err := c.barrier.CheckUpgrade()
+ if err != nil {
+ return err
+ }
+
+ // Nothing to do if no upgrade
+ if !didUpgrade {
+ break
+ }
+ if c.logger.IsInfo() {
+ c.logger.Info("core: upgraded to new key term", "term", newTerm)
+ }
+ }
+ return nil
+}
+
+// scheduleUpgradeCleanup is used to ensure that all the upgrade paths
+// are cleaned up in a timely manner if a leader failover takes place
+func (c *Core) scheduleUpgradeCleanup() error {
+ // List the upgrades
+ upgrades, err := c.barrier.List(keyringUpgradePrefix)
+ if err != nil {
+ return fmt.Errorf("failed to list upgrades: %v", err)
+ }
+
+ // Nothing to do if no upgrades
+ if len(upgrades) == 0 {
+ return nil
+ }
+
+ // Schedule cleanup for all of them
+ time.AfterFunc(keyRotateGracePeriod, func() {
+ for _, upgrade := range upgrades {
+ path := fmt.Sprintf("%s%s", keyringUpgradePrefix, upgrade)
+ if err := c.barrier.Delete(path); err != nil {
+ c.logger.Error("core: failed to cleanup upgrade", "path", path, "error", err)
+ }
+ }
+ })
+ return nil
+}
+
+func (c *Core) performKeyUpgrades() error {
+ if err := c.checkKeyUpgrades(); err != nil {
+ return errwrap.Wrapf("error checking for key upgrades: {{err}}", err)
+ }
+
+ if err := c.barrier.ReloadMasterKey(); err != nil {
+ return errwrap.Wrapf("error reloading master key: {{err}}", err)
+ }
+
+ if err := c.barrier.ReloadKeyring(); err != nil {
+ return errwrap.Wrapf("error reloading keyring: {{err}}", err)
+ }
+
+ if err := c.scheduleUpgradeCleanup(); err != nil {
+ return errwrap.Wrapf("error scheduling upgrade cleanup: {{err}}", err)
+ }
+
+ return nil
+}
+
+// acquireLock blocks until the lock is acquired, returning the leaderLostCh
+func (c *Core) acquireLock(lock physical.Lock, stopCh <-chan struct{}) <-chan struct{} {
+ for {
+ // Attempt lock acquisition
+ leaderLostCh, err := lock.Lock(stopCh)
+ if err == nil {
+ return leaderLostCh
+ }
+
+ // Retry the acquisition
+ c.logger.Error("core: failed to acquire lock", "error", err)
+ select {
+ case <-time.After(lockRetryInterval):
+ case <-stopCh:
+ return nil
+ }
+ }
+}
+
+// advertiseLeader is used to advertise the current node as leader
+func (c *Core) advertiseLeader(uuid string, leaderLostCh <-chan struct{}) error {
+ go c.cleanLeaderPrefix(uuid, leaderLostCh)
+
+ var key *ecdsa.PrivateKey
+ switch c.localClusterPrivateKey.(type) {
+ case *ecdsa.PrivateKey:
+ key = c.localClusterPrivateKey.(*ecdsa.PrivateKey)
+ default:
+ c.logger.Error("core: unknown cluster private key type", "key_type", fmt.Sprintf("%T", c.localClusterPrivateKey))
+ return fmt.Errorf("unknown cluster private key type %T", c.localClusterPrivateKey)
+ }
+
+ keyParams := &clusterKeyParams{
+ Type: corePrivateKeyTypeP521,
+ X: key.X,
+ Y: key.Y,
+ D: key.D,
+ }
+
+ adv := &activeAdvertisement{
+ RedirectAddr: c.redirectAddr,
+ ClusterAddr: c.clusterAddr,
+ ClusterCert: c.localClusterCert,
+ ClusterKeyParams: keyParams,
+ }
+ val, err := jsonutil.EncodeJSON(adv)
+ if err != nil {
+ return err
+ }
+ ent := &Entry{
+ Key: coreLeaderPrefix + uuid,
+ Value: val,
+ }
+ err = c.barrier.Put(ent)
+ if err != nil {
+ return err
+ }
+
+ sd, ok := c.ha.(physical.ServiceDiscovery)
+ if ok {
+ if err := sd.NotifyActiveStateChange(); err != nil {
+ if c.logger.IsWarn() {
+ c.logger.Warn("core: failed to notify active status", "error", err)
+ }
+ }
+ }
+ return nil
+}
+
+func (c *Core) cleanLeaderPrefix(uuid string, leaderLostCh <-chan struct{}) {
+ keys, err := c.barrier.List(coreLeaderPrefix)
+ if err != nil {
+ c.logger.Error("core: failed to list entries in core/leader", "error", err)
+ return
+ }
+ for len(keys) > 0 {
+ select {
+ case <-time.After(leaderPrefixCleanDelay):
+ if keys[0] != uuid {
+ c.barrier.Delete(coreLeaderPrefix + keys[0])
+ }
+ keys = keys[1:]
+ case <-leaderLostCh:
+ return
+ }
+ }
+}
+
+// clearLeader is used to clear our leadership entry
+func (c *Core) clearLeader(uuid string) error {
+ key := coreLeaderPrefix + uuid
+ err := c.barrier.Delete(key)
+
+ // Advertise ourselves as a standby
+ sd, ok := c.ha.(physical.ServiceDiscovery)
+ if ok {
+ if err := sd.NotifyActiveStateChange(); err != nil {
+ if c.logger.IsWarn() {
+ c.logger.Warn("core: failed to notify standby status", "error", err)
+ }
+ }
+ }
+
+ return err
+}
+
+// emitMetrics is used to periodically expose metrics while runnig
+func (c *Core) emitMetrics(stopCh chan struct{}) {
+ for {
+ select {
+ case <-time.After(time.Second):
+ c.metricsMutex.Lock()
+ if c.expiration != nil {
+ c.expiration.emitMetrics()
+ }
+ c.metricsMutex.Unlock()
+ case <-stopCh:
+ return
+ }
+ }
+}
+
+func (c *Core) ReplicationState() consts.ReplicationState {
+ var state consts.ReplicationState
+ c.clusterParamsLock.RLock()
+ state = c.replicationState
+ c.clusterParamsLock.RUnlock()
+ return state
+}
+
+func (c *Core) SealAccess() *SealAccess {
+ sa := &SealAccess{}
+ sa.SetSeal(c.seal)
+ return sa
+}
+
+func (c *Core) Logger() log.Logger {
+ return c.logger
+}
+
+func (c *Core) BarrierKeyLength() (min, max int) {
+ min, max = c.barrier.KeyLength()
+ max += shamir.ShareOverhead
+ return
+}
+
+func (c *Core) AuditedHeadersConfig() *AuditedHeadersConfig {
+ return c.auditedHeaders
+}
+
+func lastRemoteWALImpl(c *Core) uint64 {
+ return 0
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/core_test.go b/vendor/github.com/hashicorp/vault/vault/core_test.go
new file mode 100644
index 0000000..ced18cd
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/core_test.go
@@ -0,0 +1,2079 @@
+package vault
+
+import (
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/errwrap"
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/audit"
+ "github.com/hashicorp/vault/helper/consts"
+ "github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/physical"
+ log "github.com/mgutz/logxi/v1"
+)
+
+var (
+ // invalidKey is used to test Unseal
+ invalidKey = []byte("abcdefghijklmnopqrstuvwxyz")[:17]
+)
+
+func TestNewCore_badRedirectAddr(t *testing.T) {
+ logger = logformat.NewVaultLogger(log.LevelTrace)
+
+ conf := &CoreConfig{
+ RedirectAddr: "127.0.0.1:8200",
+ Physical: physical.NewInmem(logger),
+ DisableMlock: true,
+ }
+ _, err := NewCore(conf)
+ if err == nil {
+ t.Fatal("should error")
+ }
+}
+
+func TestSealConfig_Invalid(t *testing.T) {
+ s := &SealConfig{
+ SecretShares: 2,
+ SecretThreshold: 1,
+ }
+ err := s.Validate()
+ if err == nil {
+ t.Fatalf("expected err")
+ }
+}
+
+func TestCore_Unseal_MultiShare(t *testing.T) {
+ c := TestCore(t)
+
+ _, err := TestCoreUnseal(c, invalidKey)
+ if err != ErrNotInit {
+ t.Fatalf("err: %v", err)
+ }
+
+ sealConf := &SealConfig{
+ SecretShares: 5,
+ SecretThreshold: 3,
+ }
+ res, err := c.Initialize(&InitParams{
+ BarrierConfig: sealConf,
+ RecoveryConfig: nil,
+ })
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ sealed, err := c.Sealed()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !sealed {
+ t.Fatalf("should be sealed")
+ }
+
+ if prog, _ := c.SecretProgress(); prog != 0 {
+ t.Fatalf("bad progress: %d", prog)
+ }
+
+ for i := 0; i < 5; i++ {
+ unseal, err := TestCoreUnseal(c, res.SecretShares[i])
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Ignore redundant
+ _, err = TestCoreUnseal(c, res.SecretShares[i])
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if i >= 2 {
+ if !unseal {
+ t.Fatalf("should be unsealed")
+ }
+ if prog, _ := c.SecretProgress(); prog != 0 {
+ t.Fatalf("bad progress: %d", prog)
+ }
+ } else {
+ if unseal {
+ t.Fatalf("should not be unsealed")
+ }
+ if prog, _ := c.SecretProgress(); prog != i+1 {
+ t.Fatalf("bad progress: %d", prog)
+ }
+ }
+ }
+
+ sealed, err = c.Sealed()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if sealed {
+ t.Fatalf("should not be sealed")
+ }
+
+ err = c.Seal(res.RootToken)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Ignore redundant
+ err = c.Seal(res.RootToken)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ sealed, err = c.Sealed()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !sealed {
+ t.Fatalf("should be sealed")
+ }
+}
+
+func TestCore_Unseal_Single(t *testing.T) {
+ c := TestCore(t)
+
+ _, err := TestCoreUnseal(c, invalidKey)
+ if err != ErrNotInit {
+ t.Fatalf("err: %v", err)
+ }
+
+ sealConf := &SealConfig{
+ SecretShares: 1,
+ SecretThreshold: 1,
+ }
+ res, err := c.Initialize(&InitParams{
+ BarrierConfig: sealConf,
+ RecoveryConfig: nil,
+ })
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ sealed, err := c.Sealed()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !sealed {
+ t.Fatalf("should be sealed")
+ }
+
+ if prog, _ := c.SecretProgress(); prog != 0 {
+ t.Fatalf("bad progress: %d", prog)
+ }
+
+ unseal, err := TestCoreUnseal(c, res.SecretShares[0])
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if !unseal {
+ t.Fatalf("should be unsealed")
+ }
+ if prog, _ := c.SecretProgress(); prog != 0 {
+ t.Fatalf("bad progress: %d", prog)
+ }
+
+ sealed, err = c.Sealed()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if sealed {
+ t.Fatalf("should not be sealed")
+ }
+}
+
+func TestCore_Route_Sealed(t *testing.T) {
+ c := TestCore(t)
+ sealConf := &SealConfig{
+ SecretShares: 1,
+ SecretThreshold: 1,
+ }
+
+ // Should not route anything
+ req := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "sys/mounts",
+ }
+ _, err := c.HandleRequest(req)
+ if err != consts.ErrSealed {
+ t.Fatalf("err: %v", err)
+ }
+
+ res, err := c.Initialize(&InitParams{
+ BarrierConfig: sealConf,
+ RecoveryConfig: nil,
+ })
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ unseal, err := TestCoreUnseal(c, res.SecretShares[0])
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !unseal {
+ t.Fatalf("should be unsealed")
+ }
+
+ // Should not error after unseal
+ req.ClientToken = res.RootToken
+ _, err = c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+// Attempt to unseal after doing a first seal
+func TestCore_SealUnseal(t *testing.T) {
+ c, keys, root := TestCoreUnsealed(t)
+ if err := c.Seal(root); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ for i, key := range keys {
+ unseal, err := TestCoreUnseal(c, key)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if i+1 == len(keys) && !unseal {
+ t.Fatalf("err: should be unsealed")
+ }
+ }
+}
+
+// Attempt to shutdown after unseal
+func TestCore_Shutdown(t *testing.T) {
+ c, _, _ := TestCoreUnsealed(t)
+ if err := c.Shutdown(); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if sealed, err := c.Sealed(); err != nil || !sealed {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+// Attempt to seal bad token
+func TestCore_Seal_BadToken(t *testing.T) {
+ c, _, _ := TestCoreUnsealed(t)
+ if err := c.Seal("foo"); err == nil {
+ t.Fatalf("err: %v", err)
+ }
+ if sealed, err := c.Sealed(); err != nil || sealed {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+// Ensure we get a LeaseID
+func TestCore_HandleRequest_Lease(t *testing.T) {
+ c, _, root := TestCoreUnsealed(t)
+
+ req := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "secret/test",
+ Data: map[string]interface{}{
+ "foo": "bar",
+ "lease": "1h",
+ },
+ ClientToken: root,
+ }
+ resp, err := c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Read the key
+ req.Operation = logical.ReadOperation
+ req.Data = nil
+ resp, err = c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil || resp.Secret == nil || resp.Data == nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+ if resp.Secret.TTL != time.Hour {
+ t.Fatalf("bad: %#v", resp.Secret)
+ }
+ if resp.Secret.LeaseID == "" {
+ t.Fatalf("bad: %#v", resp.Secret)
+ }
+ if resp.Data["foo"] != "bar" {
+ t.Fatalf("bad: %#v", resp.Data)
+ }
+}
+
+func TestCore_HandleRequest_Lease_MaxLength(t *testing.T) {
+ c, _, root := TestCoreUnsealed(t)
+
+ req := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "secret/test",
+ Data: map[string]interface{}{
+ "foo": "bar",
+ "lease": "1000h",
+ },
+ ClientToken: root,
+ }
+ resp, err := c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Read the key
+ req.Operation = logical.ReadOperation
+ req.Data = nil
+ resp, err = c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil || resp.Secret == nil || resp.Data == nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+ if resp.Secret.TTL != c.maxLeaseTTL {
+ t.Fatalf("bad: %#v", resp.Secret)
+ }
+ if resp.Secret.LeaseID == "" {
+ t.Fatalf("bad: %#v", resp.Secret)
+ }
+ if resp.Data["foo"] != "bar" {
+ t.Fatalf("bad: %#v", resp.Data)
+ }
+}
+
+func TestCore_HandleRequest_Lease_DefaultLength(t *testing.T) {
+ c, _, root := TestCoreUnsealed(t)
+
+ req := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "secret/test",
+ Data: map[string]interface{}{
+ "foo": "bar",
+ "lease": "0h",
+ },
+ ClientToken: root,
+ }
+ resp, err := c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Read the key
+ req.Operation = logical.ReadOperation
+ req.Data = nil
+ resp, err = c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil || resp.Secret == nil || resp.Data == nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+ if resp.Secret.TTL != c.defaultLeaseTTL {
+ t.Fatalf("bad: %#v", resp.Secret)
+ }
+ if resp.Secret.LeaseID == "" {
+ t.Fatalf("bad: %#v", resp.Secret)
+ }
+ if resp.Data["foo"] != "bar" {
+ t.Fatalf("bad: %#v", resp.Data)
+ }
+}
+
+func TestCore_HandleRequest_MissingToken(t *testing.T) {
+ c, _, _ := TestCoreUnsealed(t)
+
+ req := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "secret/test",
+ Data: map[string]interface{}{
+ "foo": "bar",
+ "lease": "1h",
+ },
+ }
+ resp, err := c.HandleRequest(req)
+ if err == nil || !errwrap.Contains(err, logical.ErrInvalidRequest.Error()) {
+ t.Fatalf("err: %v", err)
+ }
+ if resp.Data["error"] != "missing client token" {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
+
+func TestCore_HandleRequest_InvalidToken(t *testing.T) {
+ c, _, _ := TestCoreUnsealed(t)
+
+ req := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "secret/test",
+ Data: map[string]interface{}{
+ "foo": "bar",
+ "lease": "1h",
+ },
+ ClientToken: "foobarbaz",
+ }
+ resp, err := c.HandleRequest(req)
+ if err == nil || !errwrap.Contains(err, logical.ErrPermissionDenied.Error()) {
+ t.Fatalf("err: %v", err)
+ }
+ if resp.Data["error"] != "permission denied" {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
+
+// Check that standard permissions work
+func TestCore_HandleRequest_NoSlash(t *testing.T) {
+ c, _, root := TestCoreUnsealed(t)
+
+ req := &logical.Request{
+ Operation: logical.HelpOperation,
+ Path: "secret",
+ ClientToken: root,
+ }
+ resp, err := c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v, resp: %v", err, resp)
+ }
+ if _, ok := resp.Data["help"]; !ok {
+ t.Fatalf("resp: %v", resp)
+ }
+}
+
+// Test a root path is denied if non-root
+func TestCore_HandleRequest_RootPath(t *testing.T) {
+ c, _, root := TestCoreUnsealed(t)
+ testCoreMakeToken(t, c, root, "child", "", []string{"test"})
+
+ req := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "sys/policy", // root protected!
+ ClientToken: "child",
+ }
+ resp, err := c.HandleRequest(req)
+ if err == nil || !errwrap.Contains(err, logical.ErrPermissionDenied.Error()) {
+ t.Fatalf("err: %v, resp: %v", err, resp)
+ }
+}
+
+// Test a root path is allowed if non-root but with sudo
+func TestCore_HandleRequest_RootPath_WithSudo(t *testing.T) {
+ c, _, root := TestCoreUnsealed(t)
+
+ // Set the 'test' policy object to permit access to sys/policy
+ req := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "sys/policy/test", // root protected!
+ Data: map[string]interface{}{
+ "rules": `path "sys/policy" { policy = "sudo" }`,
+ },
+ ClientToken: root,
+ }
+ resp, err := c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Child token (non-root) but with 'test' policy should have access
+ testCoreMakeToken(t, c, root, "child", "", []string{"test"})
+ req = &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "sys/policy", // root protected!
+ ClientToken: "child",
+ }
+ resp, err = c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
+
+// Check that standard permissions work
+func TestCore_HandleRequest_PermissionDenied(t *testing.T) {
+ c, _, root := TestCoreUnsealed(t)
+ testCoreMakeToken(t, c, root, "child", "", []string{"test"})
+
+ req := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "secret/test",
+ Data: map[string]interface{}{
+ "foo": "bar",
+ "lease": "1h",
+ },
+ ClientToken: "child",
+ }
+ resp, err := c.HandleRequest(req)
+ if err == nil || !errwrap.Contains(err, logical.ErrPermissionDenied.Error()) {
+ t.Fatalf("err: %v, resp: %v", err, resp)
+ }
+}
+
+// Check that standard permissions work
+func TestCore_HandleRequest_PermissionAllowed(t *testing.T) {
+ c, _, root := TestCoreUnsealed(t)
+ testCoreMakeToken(t, c, root, "child", "", []string{"test"})
+
+ // Set the 'test' policy object to permit access to secret/
+ req := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "sys/policy/test",
+ Data: map[string]interface{}{
+ "rules": `path "secret/*" { policy = "write" }`,
+ },
+ ClientToken: root,
+ }
+ resp, err := c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Write should work now
+ req = &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "secret/test",
+ Data: map[string]interface{}{
+ "foo": "bar",
+ "lease": "1h",
+ },
+ ClientToken: "child",
+ }
+ resp, err = c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
+
+func TestCore_HandleRequest_NoClientToken(t *testing.T) {
+ noop := &NoopBackend{
+ Response: &logical.Response{},
+ }
+ c, _, root := TestCoreUnsealed(t)
+ c.logicalBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
+ return noop, nil
+ }
+
+ // Enable the logical backend
+ req := logical.TestRequest(t, logical.UpdateOperation, "sys/mounts/foo")
+ req.Data["type"] = "noop"
+ req.Data["description"] = "foo"
+ req.ClientToken = root
+ _, err := c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Attempt to request with connection data
+ req = &logical.Request{
+ Path: "foo/login",
+ }
+ req.ClientToken = root
+ if _, err := c.HandleRequest(req); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ ct := noop.Requests[0].ClientToken
+ if ct == "" || ct == root {
+ t.Fatalf("bad: %#v", noop.Requests)
+ }
+}
+
+func TestCore_HandleRequest_ConnOnLogin(t *testing.T) {
+ noop := &NoopBackend{
+ Login: []string{"login"},
+ Response: &logical.Response{},
+ }
+ c, _, root := TestCoreUnsealed(t)
+ c.credentialBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
+ return noop, nil
+ }
+
+ // Enable the credential backend
+ req := logical.TestRequest(t, logical.UpdateOperation, "sys/auth/foo")
+ req.Data["type"] = "noop"
+ req.ClientToken = root
+ _, err := c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Attempt to request with connection data
+ req = &logical.Request{
+ Path: "auth/foo/login",
+ Connection: &logical.Connection{},
+ }
+ if _, err := c.HandleRequest(req); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if noop.Requests[0].Connection == nil {
+ t.Fatalf("bad: %#v", noop.Requests)
+ }
+}
+
+// Ensure we get a client token
+func TestCore_HandleLogin_Token(t *testing.T) {
+ noop := &NoopBackend{
+ Login: []string{"login"},
+ Response: &logical.Response{
+ Auth: &logical.Auth{
+ Policies: []string{"foo", "bar"},
+ Metadata: map[string]string{
+ "user": "armon",
+ },
+ DisplayName: "armon",
+ },
+ },
+ }
+ c, _, root := TestCoreUnsealed(t)
+ c.credentialBackends["noop"] = func(conf *logical.BackendConfig) (logical.Backend, error) {
+ return noop, nil
+ }
+
+ // Enable the credential backend
+ req := logical.TestRequest(t, logical.UpdateOperation, "sys/auth/foo")
+ req.Data["type"] = "noop"
+ req.ClientToken = root
+ _, err := c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Attempt to login
+ lreq := &logical.Request{
+ Path: "auth/foo/login",
+ }
+ lresp, err := c.HandleRequest(lreq)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Ensure we got a client token back
+ clientToken := lresp.Auth.ClientToken
+ if clientToken == "" {
+ t.Fatalf("bad: %#v", lresp)
+ }
+
+ // Check the policy and metadata
+ te, err := c.tokenStore.Lookup(clientToken)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ expect := &TokenEntry{
+ ID: clientToken,
+ Accessor: te.Accessor,
+ Parent: "",
+ Policies: []string{"bar", "default", "foo"},
+ Path: "auth/foo/login",
+ Meta: map[string]string{
+ "user": "armon",
+ },
+ DisplayName: "foo-armon",
+ TTL: time.Hour * 24,
+ CreationTime: te.CreationTime,
+ }
+
+ if !reflect.DeepEqual(te, expect) {
+ t.Fatalf("Bad: %#v expect: %#v", te, expect)
+ }
+
+ // Check that we have a lease with default duration
+ if lresp.Auth.TTL != noop.System().DefaultLeaseTTL() {
+ t.Fatalf("bad: %#v, defaultLeaseTTL: %#v", lresp.Auth, c.defaultLeaseTTL)
+ }
+}
+
+func TestCore_HandleRequest_AuditTrail(t *testing.T) {
+ // Create a noop audit backend
+ noop := &NoopAudit{}
+ c, _, root := TestCoreUnsealed(t)
+ c.auditBackends["noop"] = func(config *audit.BackendConfig) (audit.Backend, error) {
+ noop = &NoopAudit{
+ Config: config,
+ }
+ return noop, nil
+ }
+
+ // Enable the audit backend
+ req := logical.TestRequest(t, logical.UpdateOperation, "sys/audit/noop")
+ req.Data["type"] = "noop"
+ req.ClientToken = root
+ resp, err := c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Make a request
+ req = &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "secret/test",
+ Data: map[string]interface{}{
+ "foo": "bar",
+ "lease": "1h",
+ },
+ ClientToken: root,
+ }
+ req.ClientToken = root
+ if _, err := c.HandleRequest(req); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Check the audit trail on request and response
+ if len(noop.ReqAuth) != 1 {
+ t.Fatalf("bad: %#v", noop)
+ }
+ auth := noop.ReqAuth[0]
+ if auth.ClientToken != root {
+ t.Fatalf("bad client token: %#v", auth)
+ }
+ if len(auth.Policies) != 1 || auth.Policies[0] != "root" {
+ t.Fatalf("bad: %#v", auth)
+ }
+ if len(noop.Req) != 1 || !reflect.DeepEqual(noop.Req[0], req) {
+ t.Fatalf("Bad: %#v", noop.Req[0])
+ }
+
+ if len(noop.RespAuth) != 2 {
+ t.Fatalf("bad: %#v", noop)
+ }
+ if !reflect.DeepEqual(noop.RespAuth[1], auth) {
+ t.Fatalf("bad: %#v", auth)
+ }
+ if len(noop.RespReq) != 2 || !reflect.DeepEqual(noop.RespReq[1], req) {
+ t.Fatalf("Bad: %#v", noop.RespReq[1])
+ }
+ if len(noop.Resp) != 2 || !reflect.DeepEqual(noop.Resp[1], resp) {
+ t.Fatalf("Bad: %#v", noop.Resp[1])
+ }
+}
+
+// Ensure we get a client token
+func TestCore_HandleLogin_AuditTrail(t *testing.T) {
+ // Create a badass credential backend that always logs in as armon
+ noop := &NoopAudit{}
+ noopBack := &NoopBackend{
+ Login: []string{"login"},
+ Response: &logical.Response{
+ Auth: &logical.Auth{
+ LeaseOptions: logical.LeaseOptions{
+ TTL: time.Hour,
+ },
+ Policies: []string{"foo", "bar"},
+ Metadata: map[string]string{
+ "user": "armon",
+ },
+ },
+ },
+ }
+ c, _, root := TestCoreUnsealed(t)
+ c.credentialBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
+ return noopBack, nil
+ }
+ c.auditBackends["noop"] = func(config *audit.BackendConfig) (audit.Backend, error) {
+ noop = &NoopAudit{
+ Config: config,
+ }
+ return noop, nil
+ }
+
+ // Enable the credential backend
+ req := logical.TestRequest(t, logical.UpdateOperation, "sys/auth/foo")
+ req.Data["type"] = "noop"
+ req.ClientToken = root
+ _, err := c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Enable the audit backend
+ req = logical.TestRequest(t, logical.UpdateOperation, "sys/audit/noop")
+ req.Data["type"] = "noop"
+ req.ClientToken = root
+ _, err = c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Attempt to login
+ lreq := &logical.Request{
+ Path: "auth/foo/login",
+ }
+ lresp, err := c.HandleRequest(lreq)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Ensure we got a client token back
+ clientToken := lresp.Auth.ClientToken
+ if clientToken == "" {
+ t.Fatalf("bad: %#v", lresp)
+ }
+
+ // Check the audit trail on request and response
+ if len(noop.ReqAuth) != 1 {
+ t.Fatalf("bad: %#v", noop)
+ }
+ if len(noop.Req) != 1 || !reflect.DeepEqual(noop.Req[0], lreq) {
+ t.Fatalf("Bad: %#v %#v", noop.Req[0], lreq)
+ }
+
+ if len(noop.RespAuth) != 2 {
+ t.Fatalf("bad: %#v", noop)
+ }
+ auth := noop.RespAuth[1]
+ if auth.ClientToken != clientToken {
+ t.Fatalf("bad client token: %#v", auth)
+ }
+ if len(auth.Policies) != 3 || auth.Policies[0] != "bar" || auth.Policies[1] != "default" || auth.Policies[2] != "foo" {
+ t.Fatalf("bad: %#v", auth)
+ }
+ if len(noop.RespReq) != 2 || !reflect.DeepEqual(noop.RespReq[1], lreq) {
+ t.Fatalf("Bad: %#v", noop.RespReq[1])
+ }
+ if len(noop.Resp) != 2 || !reflect.DeepEqual(noop.Resp[1], lresp) {
+ t.Fatalf("Bad: %#v %#v", noop.Resp[1], lresp)
+ }
+}
+
+// Check that we register a lease for new tokens
+func TestCore_HandleRequest_CreateToken_Lease(t *testing.T) {
+ c, _, root := TestCoreUnsealed(t)
+
+ // Create a new credential
+ req := logical.TestRequest(t, logical.UpdateOperation, "auth/token/create")
+ req.ClientToken = root
+ req.Data["policies"] = []string{"foo"}
+ resp, err := c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Ensure we got a new client token back
+ clientToken := resp.Auth.ClientToken
+ if clientToken == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Check the policy and metadata
+ te, err := c.tokenStore.Lookup(clientToken)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ expect := &TokenEntry{
+ ID: clientToken,
+ Accessor: te.Accessor,
+ Parent: root,
+ Policies: []string{"default", "foo"},
+ Path: "auth/token/create",
+ DisplayName: "token",
+ CreationTime: te.CreationTime,
+ TTL: time.Hour * 24 * 32,
+ }
+ if !reflect.DeepEqual(te, expect) {
+ t.Fatalf("Bad: %#v expect: %#v", te, expect)
+ }
+
+ // Check that we have a lease with default duration
+ if resp.Auth.TTL != c.defaultLeaseTTL {
+ t.Fatalf("bad: %#v", resp.Auth)
+ }
+}
+
+// Check that we handle excluding the default policy
+func TestCore_HandleRequest_CreateToken_NoDefaultPolicy(t *testing.T) {
+ c, _, root := TestCoreUnsealed(t)
+
+ // Create a new credential
+ req := logical.TestRequest(t, logical.UpdateOperation, "auth/token/create")
+ req.ClientToken = root
+ req.Data["policies"] = []string{"foo"}
+ req.Data["no_default_policy"] = true
+ resp, err := c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Ensure we got a new client token back
+ clientToken := resp.Auth.ClientToken
+ if clientToken == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Check the policy and metadata
+ te, err := c.tokenStore.Lookup(clientToken)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ expect := &TokenEntry{
+ ID: clientToken,
+ Accessor: te.Accessor,
+ Parent: root,
+ Policies: []string{"foo"},
+ Path: "auth/token/create",
+ DisplayName: "token",
+ CreationTime: te.CreationTime,
+ TTL: time.Hour * 24 * 32,
+ }
+ if !reflect.DeepEqual(te, expect) {
+ t.Fatalf("Bad: %#v expect: %#v", te, expect)
+ }
+}
+
+func TestCore_LimitedUseToken(t *testing.T) {
+ c, _, root := TestCoreUnsealed(t)
+
+ // Create a new credential
+ req := logical.TestRequest(t, logical.UpdateOperation, "auth/token/create")
+ req.ClientToken = root
+ req.Data["num_uses"] = "1"
+ resp, err := c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Put a secret
+ req = &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "secret/foo",
+ Data: map[string]interface{}{
+ "foo": "bar",
+ },
+ ClientToken: resp.Auth.ClientToken,
+ }
+ _, err = c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Second operation should fail
+ _, err = c.HandleRequest(req)
+ if err == nil || !errwrap.Contains(err, logical.ErrPermissionDenied.Error()) {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func TestCore_Standby_Seal(t *testing.T) {
+ // Create the first core and initialize it
+ logger = logformat.NewVaultLogger(log.LevelTrace)
+
+ inm := physical.NewInmem(logger)
+ inmha := physical.NewInmemHA(logger)
+ redirectOriginal := "http://127.0.0.1:8200"
+ core, err := NewCore(&CoreConfig{
+ Physical: inm,
+ HAPhysical: inmha,
+ RedirectAddr: redirectOriginal,
+ DisableMlock: true,
+ })
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ keys, root := TestCoreInit(t, core)
+ for _, key := range keys {
+ if _, err := TestCoreUnseal(core, TestKeyCopy(key)); err != nil {
+ t.Fatalf("unseal err: %s", err)
+ }
+ }
+
+ // Verify unsealed
+ sealed, err := core.Sealed()
+ if err != nil {
+ t.Fatalf("err checking seal status: %s", err)
+ }
+ if sealed {
+ t.Fatal("should not be sealed")
+ }
+
+ // Wait for core to become active
+ TestWaitActive(t, core)
+
+ // Check the leader is local
+ isLeader, advertise, err := core.Leader()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !isLeader {
+ t.Fatalf("should be leader")
+ }
+ if advertise != redirectOriginal {
+ t.Fatalf("Bad advertise: %v, orig is %v", advertise, redirectOriginal)
+ }
+
+ // Create the second core and initialize it
+ redirectOriginal2 := "http://127.0.0.1:8500"
+ core2, err := NewCore(&CoreConfig{
+ Physical: inm,
+ HAPhysical: inmha,
+ RedirectAddr: redirectOriginal2,
+ DisableMlock: true,
+ })
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ for _, key := range keys {
+ if _, err := TestCoreUnseal(core2, TestKeyCopy(key)); err != nil {
+ t.Fatalf("unseal err: %s", err)
+ }
+ }
+
+ // Verify unsealed
+ sealed, err = core2.Sealed()
+ if err != nil {
+ t.Fatalf("err checking seal status: %s", err)
+ }
+ if sealed {
+ t.Fatal("should not be sealed")
+ }
+
+ // Core2 should be in standby
+ standby, err := core2.Standby()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !standby {
+ t.Fatalf("should be standby")
+ }
+
+ // Check the leader is not local
+ isLeader, advertise, err = core2.Leader()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if isLeader {
+ t.Fatalf("should not be leader")
+ }
+ if advertise != redirectOriginal {
+ t.Fatalf("Bad advertise: %v, orig is %v", advertise, redirectOriginal)
+ }
+
+ // Seal the standby core with the correct token. Shouldn't go down
+ err = core2.Seal(root)
+ if err == nil {
+ t.Fatal("should not be sealed")
+ }
+
+ keyUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Seal the standby core with an invalid token. Shouldn't go down
+ err = core2.Seal(keyUUID)
+ if err == nil {
+ t.Fatal("should not be sealed")
+ }
+}
+
+func TestCore_StepDown(t *testing.T) {
+ // Create the first core and initialize it
+ logger = logformat.NewVaultLogger(log.LevelTrace)
+
+ inm := physical.NewInmem(logger)
+ inmha := physical.NewInmemHA(logger)
+ redirectOriginal := "http://127.0.0.1:8200"
+ core, err := NewCore(&CoreConfig{
+ Physical: inm,
+ HAPhysical: inmha,
+ RedirectAddr: redirectOriginal,
+ DisableMlock: true,
+ })
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ keys, root := TestCoreInit(t, core)
+ for _, key := range keys {
+ if _, err := TestCoreUnseal(core, TestKeyCopy(key)); err != nil {
+ t.Fatalf("unseal err: %s", err)
+ }
+ }
+
+ // Verify unsealed
+ sealed, err := core.Sealed()
+ if err != nil {
+ t.Fatalf("err checking seal status: %s", err)
+ }
+ if sealed {
+ t.Fatal("should not be sealed")
+ }
+
+ // Wait for core to become active
+ TestWaitActive(t, core)
+
+ // Check the leader is local
+ isLeader, advertise, err := core.Leader()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !isLeader {
+ t.Fatalf("should be leader")
+ }
+ if advertise != redirectOriginal {
+ t.Fatalf("Bad advertise: %v, orig is %v", advertise, redirectOriginal)
+ }
+
+ // Create the second core and initialize it
+ redirectOriginal2 := "http://127.0.0.1:8500"
+ core2, err := NewCore(&CoreConfig{
+ Physical: inm,
+ HAPhysical: inmha,
+ RedirectAddr: redirectOriginal2,
+ DisableMlock: true,
+ })
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ for _, key := range keys {
+ if _, err := TestCoreUnseal(core2, TestKeyCopy(key)); err != nil {
+ t.Fatalf("unseal err: %s", err)
+ }
+ }
+
+ // Verify unsealed
+ sealed, err = core2.Sealed()
+ if err != nil {
+ t.Fatalf("err checking seal status: %s", err)
+ }
+ if sealed {
+ t.Fatal("should not be sealed")
+ }
+
+ // Core2 should be in standby
+ standby, err := core2.Standby()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !standby {
+ t.Fatalf("should be standby")
+ }
+
+ // Check the leader is not local
+ isLeader, advertise, err = core2.Leader()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if isLeader {
+ t.Fatalf("should not be leader")
+ }
+ if advertise != redirectOriginal {
+ t.Fatalf("Bad advertise: %v, orig is %v", advertise, redirectOriginal)
+ }
+
+ req := &logical.Request{
+ ClientToken: root,
+ Path: "sys/step-down",
+ }
+
+ // Create an identifier for the request
+ req.ID, err = uuid.GenerateUUID()
+ if err != nil {
+ t.Fatalf("failed to generate identifier for the request: path: %s err: %v", req.Path, err)
+ }
+
+ // Step down core
+ err = core.StepDown(req)
+ if err != nil {
+ t.Fatal("error stepping down core 1")
+ }
+
+ // Give time to switch leaders
+ time.Sleep(5 * time.Second)
+
+ // Core1 should be in standby
+ standby, err = core.Standby()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !standby {
+ t.Fatalf("should be standby")
+ }
+
+ // Check the leader is core2
+ isLeader, advertise, err = core2.Leader()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !isLeader {
+ t.Fatalf("should be leader")
+ }
+ if advertise != redirectOriginal2 {
+ t.Fatalf("Bad advertise: %v, orig is %v", advertise, redirectOriginal2)
+ }
+
+ // Check the leader is not local
+ isLeader, advertise, err = core.Leader()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if isLeader {
+ t.Fatalf("should not be leader")
+ }
+ if advertise != redirectOriginal2 {
+ t.Fatalf("Bad advertise: %v, orig is %v", advertise, redirectOriginal2)
+ }
+
+ // Step down core2
+ err = core2.StepDown(req)
+ if err != nil {
+ t.Fatal("error stepping down core 1")
+ }
+
+ // Give time to switch leaders -- core 1 will still be waiting on its
+ // cooling off period so give it a full 10 seconds to recover
+ time.Sleep(10 * time.Second)
+
+ // Core2 should be in standby
+ standby, err = core2.Standby()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !standby {
+ t.Fatalf("should be standby")
+ }
+
+ // Check the leader is core1
+ isLeader, advertise, err = core.Leader()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !isLeader {
+ t.Fatalf("should be leader")
+ }
+ if advertise != redirectOriginal {
+ t.Fatalf("Bad advertise: %v, orig is %v", advertise, redirectOriginal)
+ }
+
+ // Check the leader is not local
+ isLeader, advertise, err = core2.Leader()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if isLeader {
+ t.Fatalf("should not be leader")
+ }
+ if advertise != redirectOriginal {
+ t.Fatalf("Bad advertise: %v, orig is %v", advertise, redirectOriginal)
+ }
+}
+
+func TestCore_CleanLeaderPrefix(t *testing.T) {
+ // Create the first core and initialize it
+ logger = logformat.NewVaultLogger(log.LevelTrace)
+
+ inm := physical.NewInmem(logger)
+ inmha := physical.NewInmemHA(logger)
+ redirectOriginal := "http://127.0.0.1:8200"
+ core, err := NewCore(&CoreConfig{
+ Physical: inm,
+ HAPhysical: inmha,
+ RedirectAddr: redirectOriginal,
+ DisableMlock: true,
+ })
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ keys, root := TestCoreInit(t, core)
+ for _, key := range keys {
+ if _, err := TestCoreUnseal(core, TestKeyCopy(key)); err != nil {
+ t.Fatalf("unseal err: %s", err)
+ }
+ }
+
+ // Verify unsealed
+ sealed, err := core.Sealed()
+ if err != nil {
+ t.Fatalf("err checking seal status: %s", err)
+ }
+ if sealed {
+ t.Fatal("should not be sealed")
+ }
+
+ // Wait for core to become active
+ TestWaitActive(t, core)
+
+ // Ensure that the original clean function has stopped running
+ time.Sleep(2 * time.Second)
+
+ // Put several random entries
+ for i := 0; i < 5; i++ {
+ keyUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ valueUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ core.barrier.Put(&Entry{
+ Key: coreLeaderPrefix + keyUUID,
+ Value: []byte(valueUUID),
+ })
+ }
+
+ entries, err := core.barrier.List(coreLeaderPrefix)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(entries) != 6 {
+ t.Fatalf("wrong number of core leader prefix entries, got %d", len(entries))
+ }
+
+ // Check the leader is local
+ isLeader, advertise, err := core.Leader()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !isLeader {
+ t.Fatalf("should be leader")
+ }
+ if advertise != redirectOriginal {
+ t.Fatalf("Bad advertise: %v, orig is %v", advertise, redirectOriginal)
+ }
+
+ // Create a second core, attached to same in-memory store
+ redirectOriginal2 := "http://127.0.0.1:8500"
+ core2, err := NewCore(&CoreConfig{
+ Physical: inm,
+ HAPhysical: inmha,
+ RedirectAddr: redirectOriginal2,
+ DisableMlock: true,
+ })
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ for _, key := range keys {
+ if _, err := TestCoreUnseal(core2, TestKeyCopy(key)); err != nil {
+ t.Fatalf("unseal err: %s", err)
+ }
+ }
+
+ // Verify unsealed
+ sealed, err = core2.Sealed()
+ if err != nil {
+ t.Fatalf("err checking seal status: %s", err)
+ }
+ if sealed {
+ t.Fatal("should not be sealed")
+ }
+
+ // Core2 should be in standby
+ standby, err := core2.Standby()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !standby {
+ t.Fatalf("should be standby")
+ }
+
+ // Check the leader is not local
+ isLeader, advertise, err = core2.Leader()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if isLeader {
+ t.Fatalf("should not be leader")
+ }
+ if advertise != redirectOriginal {
+ t.Fatalf("Bad advertise: %v, orig is %v", advertise, redirectOriginal)
+ }
+
+ // Seal the first core, should step down
+ err = core.Seal(root)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Core should be in standby
+ standby, err = core.Standby()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !standby {
+ t.Fatalf("should be standby")
+ }
+
+ // Wait for core2 to become active
+ TestWaitActive(t, core2)
+
+ // Check the leader is local
+ isLeader, advertise, err = core2.Leader()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !isLeader {
+ t.Fatalf("should be leader")
+ }
+ if advertise != redirectOriginal2 {
+ t.Fatalf("Bad advertise: %v, orig is %v", advertise, redirectOriginal2)
+ }
+
+ // Give time for the entries to clear out; it is conservative at 1/second
+ time.Sleep(10 * leaderPrefixCleanDelay)
+
+ entries, err = core2.barrier.List(coreLeaderPrefix)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(entries) != 1 {
+ t.Fatalf("wrong number of core leader prefix entries, got %d", len(entries))
+ }
+}
+
+func TestCore_Standby(t *testing.T) {
+ logger = logformat.NewVaultLogger(log.LevelTrace)
+
+ inmha := physical.NewInmemHA(logger)
+ testCore_Standby_Common(t, inmha, inmha)
+}
+
+func TestCore_Standby_SeparateHA(t *testing.T) {
+ logger = logformat.NewVaultLogger(log.LevelTrace)
+
+ testCore_Standby_Common(t, physical.NewInmemHA(logger), physical.NewInmemHA(logger))
+}
+
+func testCore_Standby_Common(t *testing.T, inm physical.Backend, inmha physical.HABackend) {
+ // Create the first core and initialize it
+ redirectOriginal := "http://127.0.0.1:8200"
+ core, err := NewCore(&CoreConfig{
+ Physical: inm,
+ HAPhysical: inmha,
+ RedirectAddr: redirectOriginal,
+ DisableMlock: true,
+ })
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ keys, root := TestCoreInit(t, core)
+ for _, key := range keys {
+ if _, err := TestCoreUnseal(core, TestKeyCopy(key)); err != nil {
+ t.Fatalf("unseal err: %s", err)
+ }
+ }
+
+ // Verify unsealed
+ sealed, err := core.Sealed()
+ if err != nil {
+ t.Fatalf("err checking seal status: %s", err)
+ }
+ if sealed {
+ t.Fatal("should not be sealed")
+ }
+
+ // Wait for core to become active
+ TestWaitActive(t, core)
+
+ // Put a secret
+ req := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "secret/foo",
+ Data: map[string]interface{}{
+ "foo": "bar",
+ },
+ ClientToken: root,
+ }
+ _, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Check the leader is local
+ isLeader, advertise, err := core.Leader()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !isLeader {
+ t.Fatalf("should be leader")
+ }
+ if advertise != redirectOriginal {
+ t.Fatalf("Bad advertise: %v, orig is %v", advertise, redirectOriginal)
+ }
+
+ // Create a second core, attached to same in-memory store
+ redirectOriginal2 := "http://127.0.0.1:8500"
+ core2, err := NewCore(&CoreConfig{
+ Physical: inm,
+ HAPhysical: inmha,
+ RedirectAddr: redirectOriginal2,
+ DisableMlock: true,
+ })
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ for _, key := range keys {
+ if _, err := TestCoreUnseal(core2, TestKeyCopy(key)); err != nil {
+ t.Fatalf("unseal err: %s", err)
+ }
+ }
+
+ // Verify unsealed
+ sealed, err = core2.Sealed()
+ if err != nil {
+ t.Fatalf("err checking seal status: %s", err)
+ }
+ if sealed {
+ t.Fatal("should not be sealed")
+ }
+
+ // Core2 should be in standby
+ standby, err := core2.Standby()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !standby {
+ t.Fatalf("should be standby")
+ }
+
+ // Request should fail in standby mode
+ _, err = core2.HandleRequest(req)
+ if err != consts.ErrStandby {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Check the leader is not local
+ isLeader, advertise, err = core2.Leader()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if isLeader {
+ t.Fatalf("should not be leader")
+ }
+ if advertise != redirectOriginal {
+ t.Fatalf("Bad advertise: %v, orig is %v", advertise, redirectOriginal)
+ }
+
+ // Seal the first core, should step down
+ err = core.Seal(root)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Core should be in standby
+ standby, err = core.Standby()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !standby {
+ t.Fatalf("should be standby")
+ }
+
+ // Wait for core2 to become active
+ TestWaitActive(t, core2)
+
+ // Read the secret
+ req = &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "secret/foo",
+ ClientToken: root,
+ }
+ resp, err := core2.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Verify the response
+ if resp.Data["foo"] != "bar" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Check the leader is local
+ isLeader, advertise, err = core2.Leader()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !isLeader {
+ t.Fatalf("should be leader")
+ }
+ if advertise != redirectOriginal2 {
+ t.Fatalf("Bad advertise: %v, orig is %v", advertise, redirectOriginal2)
+ }
+
+ if inm.(*physical.InmemHABackend) == inmha.(*physical.InmemHABackend) {
+ lockSize := inm.(*physical.InmemHABackend).LockMapSize()
+ if lockSize == 0 {
+ t.Fatalf("locks not used with only one HA backend")
+ }
+ } else {
+ lockSize := inmha.(*physical.InmemHABackend).LockMapSize()
+ if lockSize == 0 {
+ t.Fatalf("locks not used with expected HA backend")
+ }
+
+ lockSize = inm.(*physical.InmemHABackend).LockMapSize()
+ if lockSize != 0 {
+ t.Fatalf("locks used with unexpected HA backend")
+ }
+ }
+}
+
+// Ensure that InternalData is never returned
+func TestCore_HandleRequest_Login_InternalData(t *testing.T) {
+ noop := &NoopBackend{
+ Login: []string{"login"},
+ Response: &logical.Response{
+ Auth: &logical.Auth{
+ Policies: []string{"foo", "bar"},
+ InternalData: map[string]interface{}{
+ "foo": "bar",
+ },
+ },
+ },
+ }
+
+ c, _, root := TestCoreUnsealed(t)
+ c.credentialBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
+ return noop, nil
+ }
+
+ // Enable the credential backend
+ req := logical.TestRequest(t, logical.UpdateOperation, "sys/auth/foo")
+ req.Data["type"] = "noop"
+ req.ClientToken = root
+ _, err := c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Attempt to login
+ lreq := &logical.Request{
+ Path: "auth/foo/login",
+ }
+ lresp, err := c.HandleRequest(lreq)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Ensure we do not get the internal data
+ if lresp.Auth.InternalData != nil {
+ t.Fatalf("bad: %#v", lresp)
+ }
+}
+
+// Ensure that InternalData is never returned
+func TestCore_HandleRequest_InternalData(t *testing.T) {
+ noop := &NoopBackend{
+ Response: &logical.Response{
+ Secret: &logical.Secret{
+ InternalData: map[string]interface{}{
+ "foo": "bar",
+ },
+ },
+ Data: map[string]interface{}{
+ "foo": "bar",
+ },
+ },
+ }
+
+ c, _, root := TestCoreUnsealed(t)
+ c.logicalBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
+ return noop, nil
+ }
+
+ // Enable the credential backend
+ req := logical.TestRequest(t, logical.UpdateOperation, "sys/mounts/foo")
+ req.Data["type"] = "noop"
+ req.ClientToken = root
+ _, err := c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Attempt to read
+ lreq := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "foo/test",
+ ClientToken: root,
+ }
+ lresp, err := c.HandleRequest(lreq)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Ensure we do not get the internal data
+ if lresp.Secret.InternalData != nil {
+ t.Fatalf("bad: %#v", lresp)
+ }
+}
+
+// Ensure login does not return a secret
+func TestCore_HandleLogin_ReturnSecret(t *testing.T) {
+ // Create a badass credential backend that always logs in as armon
+ noopBack := &NoopBackend{
+ Login: []string{"login"},
+ Response: &logical.Response{
+ Secret: &logical.Secret{},
+ Auth: &logical.Auth{
+ Policies: []string{"foo", "bar"},
+ },
+ },
+ }
+ c, _, root := TestCoreUnsealed(t)
+ c.credentialBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
+ return noopBack, nil
+ }
+
+ // Enable the credential backend
+ req := logical.TestRequest(t, logical.UpdateOperation, "sys/auth/foo")
+ req.Data["type"] = "noop"
+ req.ClientToken = root
+ _, err := c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Attempt to login
+ lreq := &logical.Request{
+ Path: "auth/foo/login",
+ }
+ _, err = c.HandleRequest(lreq)
+ if err != ErrInternalError {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+// Renew should return the same lease back
+func TestCore_RenewSameLease(t *testing.T) {
+ c, _, root := TestCoreUnsealed(t)
+
+ // Create a leasable secret
+ req := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "secret/test",
+ Data: map[string]interface{}{
+ "foo": "bar",
+ "lease": "1h",
+ },
+ ClientToken: root,
+ }
+ resp, err := c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Read the key
+ req.Operation = logical.ReadOperation
+ req.Data = nil
+ resp, err = c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil || resp.Secret == nil || resp.Secret.LeaseID == "" {
+ t.Fatalf("bad: %#v", resp.Secret)
+ }
+ original := resp.Secret.LeaseID
+
+ // Renew the lease
+ req = logical.TestRequest(t, logical.UpdateOperation, "sys/renew/"+resp.Secret.LeaseID)
+ req.ClientToken = root
+ resp, err = c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Verify the lease did not change
+ if resp.Secret.LeaseID != original {
+ t.Fatalf("lease id changed: %s %s", original, resp.Secret.LeaseID)
+ }
+}
+
+// Renew of a token should not create a new lease
+func TestCore_RenewToken_SingleRegister(t *testing.T) {
+ c, _, root := TestCoreUnsealed(t)
+
+ // Create a new token
+ req := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "auth/token/create",
+ Data: map[string]interface{}{
+ "lease": "1h",
+ },
+ ClientToken: root,
+ }
+ resp, err := c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ newClient := resp.Auth.ClientToken
+
+ // Renew the token
+ req = logical.TestRequest(t, logical.UpdateOperation, "auth/token/renew")
+ req.ClientToken = newClient
+ req.Data = map[string]interface{}{
+ "token": newClient,
+ }
+ resp, err = c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Revoke using the renew prefix
+ req = logical.TestRequest(t, logical.UpdateOperation, "sys/revoke-prefix/auth/token/renew/")
+ req.ClientToken = root
+ resp, err = c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Verify our token is still valid (e.g. we did not get invalided by the revoke)
+ req = logical.TestRequest(t, logical.UpdateOperation, "auth/token/lookup")
+ req.Data = map[string]interface{}{
+ "token": newClient,
+ }
+ req.ClientToken = newClient
+ resp, err = c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Verify the token exists
+ if resp.Data["id"] != newClient {
+ t.Fatalf("bad: %#v", resp.Data)
+ }
+}
+
+// Based on bug GH-203, attempt to disable a credential backend with leased secrets
+func TestCore_EnableDisableCred_WithLease(t *testing.T) {
+ noopBack := &NoopBackend{
+ Login: []string{"login"},
+ Response: &logical.Response{
+ Auth: &logical.Auth{
+ Policies: []string{"root"},
+ },
+ },
+ }
+
+ c, _, root := TestCoreUnsealed(t)
+ c.credentialBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
+ return noopBack, nil
+ }
+
+ var secretWritingPolicy = `
+name = "admins"
+path "secret/*" {
+ capabilities = ["update", "create", "read"]
+}
+`
+
+ ps := c.policyStore
+ policy, _ := Parse(secretWritingPolicy)
+ if err := ps.SetPolicy(policy); err != nil {
+ t.Fatal(err)
+ }
+
+ // Enable the credential backend
+ req := logical.TestRequest(t, logical.UpdateOperation, "sys/auth/foo")
+ req.Data["type"] = "noop"
+ req.ClientToken = root
+ _, err := c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Attempt to login -- should fail because we don't allow root to be returned
+ lreq := &logical.Request{
+ Path: "auth/foo/login",
+ }
+ lresp, err := c.HandleRequest(lreq)
+ if err == nil || lresp == nil || !lresp.IsError() {
+ t.Fatalf("expected error trying to auth and receive root policy")
+ }
+
+ // Fix and try again
+ noopBack.Response.Auth.Policies = []string{"admins"}
+ lreq = &logical.Request{
+ Path: "auth/foo/login",
+ }
+ lresp, err = c.HandleRequest(lreq)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Create a leasable secret
+ req = &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "secret/test",
+ Data: map[string]interface{}{
+ "foo": "bar",
+ "lease": "1h",
+ },
+ ClientToken: lresp.Auth.ClientToken,
+ }
+ resp, err := c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Read the key
+ req.Operation = logical.ReadOperation
+ req.Data = nil
+ resp, err = c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil || resp.Secret == nil || resp.Secret.LeaseID == "" {
+ t.Fatalf("bad: %#v", resp.Secret)
+ }
+
+ // Renew the lease
+ req = logical.TestRequest(t, logical.UpdateOperation, "sys/renew")
+ req.Data = map[string]interface{}{
+ "lease_id": resp.Secret.LeaseID,
+ }
+ req.ClientToken = lresp.Auth.ClientToken
+ _, err = c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Disable the credential backend
+ req = logical.TestRequest(t, logical.DeleteOperation, "sys/auth/foo")
+ req.ClientToken = root
+ resp, err = c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %#v", err, resp)
+ }
+}
+
+func TestCore_HandleRequest_MountPointType(t *testing.T) {
+ noop := &NoopBackend{
+ Response: &logical.Response{},
+ }
+ c, _, root := TestCoreUnsealed(t)
+ c.logicalBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
+ return noop, nil
+ }
+
+ // Enable the logical backend
+ req := logical.TestRequest(t, logical.UpdateOperation, "sys/mounts/foo")
+ req.Data["type"] = "noop"
+ req.Data["description"] = "foo"
+ req.ClientToken = root
+ _, err := c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Attempt to request
+ req = &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "foo/test",
+ Connection: &logical.Connection{},
+ }
+ req.ClientToken = root
+ if _, err := c.HandleRequest(req); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Verify Path, MountPoint, and MountType
+ if noop.Requests[0].Path != "test" {
+ t.Fatalf("bad: %#v", noop.Requests)
+ }
+ if noop.Requests[0].MountPoint != "foo/" {
+ t.Fatalf("bad: %#v", noop.Requests)
+ }
+ if noop.Requests[0].MountType != "noop" {
+ t.Fatalf("bad: %#v", noop.Requests)
+ }
+}
+
+func TestCore_Standby_Rotate(t *testing.T) {
+ // Create the first core and initialize it
+ logger = logformat.NewVaultLogger(log.LevelTrace)
+
+ inm := physical.NewInmem(logger)
+ inmha := physical.NewInmemHA(logger)
+ redirectOriginal := "http://127.0.0.1:8200"
+ core, err := NewCore(&CoreConfig{
+ Physical: inm,
+ HAPhysical: inmha,
+ RedirectAddr: redirectOriginal,
+ DisableMlock: true,
+ })
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ keys, root := TestCoreInit(t, core)
+ for _, key := range keys {
+ if _, err := TestCoreUnseal(core, TestKeyCopy(key)); err != nil {
+ t.Fatalf("unseal err: %s", err)
+ }
+ }
+
+ // Wait for core to become active
+ TestWaitActive(t, core)
+
+ // Create a second core, attached to same in-memory store
+ redirectOriginal2 := "http://127.0.0.1:8500"
+ core2, err := NewCore(&CoreConfig{
+ Physical: inm,
+ HAPhysical: inmha,
+ RedirectAddr: redirectOriginal2,
+ DisableMlock: true,
+ })
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ for _, key := range keys {
+ if _, err := TestCoreUnseal(core2, TestKeyCopy(key)); err != nil {
+ t.Fatalf("unseal err: %s", err)
+ }
+ }
+
+ // Rotate the encryption key
+ req := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "sys/rotate",
+ ClientToken: root,
+ }
+ _, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Seal the first core, should step down
+ err = core.Seal(root)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Wait for core2 to become active
+ TestWaitActive(t, core2)
+
+ // Read the key status
+ req = &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "sys/key-status",
+ ClientToken: root,
+ }
+ resp, err := core2.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Verify the response
+ if resp.Data["term"] != 2 {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/dynamic_system_view.go b/vendor/github.com/hashicorp/vault/vault/dynamic_system_view.go
new file mode 100644
index 0000000..30b6a76
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/dynamic_system_view.go
@@ -0,0 +1,89 @@
+package vault
+
+import (
+ "time"
+
+ "github.com/hashicorp/vault/helper/consts"
+ "github.com/hashicorp/vault/logical"
+)
+
+type dynamicSystemView struct {
+ core *Core
+ mountEntry *MountEntry
+}
+
+func (d dynamicSystemView) DefaultLeaseTTL() time.Duration {
+ def, _ := d.fetchTTLs()
+ return def
+}
+
+func (d dynamicSystemView) MaxLeaseTTL() time.Duration {
+ _, max := d.fetchTTLs()
+ return max
+}
+
+func (d dynamicSystemView) SudoPrivilege(path string, token string) bool {
+ // Resolve the token policy
+ te, err := d.core.tokenStore.Lookup(token)
+ if err != nil {
+ d.core.logger.Error("core: failed to lookup token", "error", err)
+ return false
+ }
+
+ // Ensure the token is valid
+ if te == nil {
+ d.core.logger.Error("entry not found for given token")
+ return false
+ }
+
+ // Construct the corresponding ACL object
+ acl, err := d.core.policyStore.ACL(te.Policies...)
+ if err != nil {
+ d.core.logger.Error("failed to retrieve ACL for token's policies", "token_policies", te.Policies, "error", err)
+ return false
+ }
+
+ // The operation type isn't important here as this is run from a path the
+ // user has already been given access to; we only care about whether they
+ // have sudo
+ req := new(logical.Request)
+ req.Operation = logical.ReadOperation
+ req.Path = path
+ _, rootPrivs := acl.AllowOperation(req)
+ return rootPrivs
+}
+
+// TTLsByPath returns the default and max TTLs corresponding to a particular
+// mount point, or the system default
+func (d dynamicSystemView) fetchTTLs() (def, max time.Duration) {
+ def = d.core.defaultLeaseTTL
+ max = d.core.maxLeaseTTL
+
+ if d.mountEntry.Config.DefaultLeaseTTL != 0 {
+ def = d.mountEntry.Config.DefaultLeaseTTL
+ }
+ if d.mountEntry.Config.MaxLeaseTTL != 0 {
+ max = d.mountEntry.Config.MaxLeaseTTL
+ }
+
+ return
+}
+
+// Tainted indicates that the mount is in the process of being removed
+func (d dynamicSystemView) Tainted() bool {
+ return d.mountEntry.Tainted
+}
+
+// CachingDisabled indicates whether to use caching behavior
+func (d dynamicSystemView) CachingDisabled() bool {
+ return d.core.cachingDisabled || (d.mountEntry != nil && d.mountEntry.Config.ForceNoCache)
+}
+
+// Checks if this is a primary Vault instance.
+func (d dynamicSystemView) ReplicationState() consts.ReplicationState {
+ var state consts.ReplicationState
+ d.core.clusterParamsLock.RLock()
+ state = d.core.replicationState
+ d.core.clusterParamsLock.RUnlock()
+ return state
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/expiration.go b/vendor/github.com/hashicorp/vault/vault/expiration.go
new file mode 100644
index 0000000..f0f885e
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/expiration.go
@@ -0,0 +1,878 @@
+package vault
+
+import (
+ "encoding/json"
+ "fmt"
+ "path"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/armon/go-metrics"
+ log "github.com/mgutz/logxi/v1"
+
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/helper/consts"
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/logical"
+)
+
+const (
+ // expirationSubPath is the sub-path used for the expiration manager
+ // view. This is nested under the system view.
+ expirationSubPath = "expire/"
+
+ // leaseViewPrefix is the prefix used for the ID based lookup of leases.
+ leaseViewPrefix = "id/"
+
+ // tokenViewPrefix is the prefix used for the token based lookup of leases.
+ tokenViewPrefix = "token/"
+
+ // maxRevokeAttempts limits how many revoke attempts are made
+ maxRevokeAttempts = 6
+
+ // revokeRetryBase is a baseline retry time
+ revokeRetryBase = 10 * time.Second
+
+ // minRevokeDelay is used to prevent an instant revoke on restore
+ minRevokeDelay = 5 * time.Second
+
+ // maxLeaseDuration is the default maximum lease duration
+ maxLeaseTTL = 32 * 24 * time.Hour
+
+ // defaultLeaseDuration is the default lease duration used when no lease is specified
+ defaultLeaseTTL = maxLeaseTTL
+)
+
+// ExpirationManager is used by the Core to manage leases. Secrets
+// can provide a lease, meaning that they can be renewed or revoked.
+// If a secret is not renewed in timely manner, it may be expired, and
+// the ExpirationManager will handle doing automatic revocation.
+type ExpirationManager struct {
+ router *Router
+ idView *BarrierView
+ tokenView *BarrierView
+ tokenStore *TokenStore
+ logger log.Logger
+
+ pending map[string]*time.Timer
+ pendingLock sync.Mutex
+}
+
+// NewExpirationManager creates a new ExpirationManager that is backed
+// using a given view, and uses the provided router for revocation.
+func NewExpirationManager(router *Router, view *BarrierView, ts *TokenStore, logger log.Logger) *ExpirationManager {
+ if logger == nil {
+ logger = log.New("expiration_manager")
+
+ }
+ exp := &ExpirationManager{
+ router: router,
+ idView: view.SubView(leaseViewPrefix),
+ tokenView: view.SubView(tokenViewPrefix),
+ tokenStore: ts,
+ logger: logger,
+ pending: make(map[string]*time.Timer),
+ }
+ return exp
+}
+
+// setupExpiration is invoked after we've loaded the mount table to
+// initialize the expiration manager
+func (c *Core) setupExpiration() error {
+ c.metricsMutex.Lock()
+ defer c.metricsMutex.Unlock()
+ // Create a sub-view
+ view := c.systemBarrierView.SubView(expirationSubPath)
+
+ // Create the manager
+ mgr := NewExpirationManager(c.router, view, c.tokenStore, c.logger)
+ c.expiration = mgr
+
+ // Link the token store to this
+ c.tokenStore.SetExpirationManager(mgr)
+
+ // Restore the existing state
+ c.logger.Info("expiration: restoring leases")
+ if err := c.expiration.Restore(); err != nil {
+ return fmt.Errorf("expiration state restore failed: %v", err)
+ }
+ return nil
+}
+
+// stopExpiration is used to stop the expiration manager before
+// sealing the Vault.
+func (c *Core) stopExpiration() error {
+ if c.expiration != nil {
+ if err := c.expiration.Stop(); err != nil {
+ return err
+ }
+ c.metricsMutex.Lock()
+ defer c.metricsMutex.Unlock()
+ c.expiration = nil
+ }
+ return nil
+}
+
+// Restore is used to recover the lease states when starting.
+// This is used after starting the vault.
+func (m *ExpirationManager) Restore() error {
+ m.pendingLock.Lock()
+ defer m.pendingLock.Unlock()
+
+ // Accumulate existing leases
+ m.logger.Debug("expiration: collecting leases")
+ existing, err := logical.CollectKeys(m.idView)
+ if err != nil {
+ return fmt.Errorf("failed to scan for leases: %v", err)
+ }
+ m.logger.Debug("expiration: leases collected", "num_existing", len(existing))
+
+ // Make the channels used for the worker pool
+ broker := make(chan string)
+ quit := make(chan bool)
+ // Buffer these channels to prevent deadlocks
+ errs := make(chan error, len(existing))
+ result := make(chan *leaseEntry, len(existing))
+
+ // Use a wait group
+ wg := &sync.WaitGroup{}
+
+ // Create 64 workers to distribute work to
+ for i := 0; i < consts.ExpirationRestoreWorkerCount; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ for {
+ select {
+ case leaseID, ok := <-broker:
+ // broker has been closed, we are done
+ if !ok {
+ return
+ }
+
+ le, err := m.loadEntry(leaseID)
+ if err != nil {
+ errs <- err
+ continue
+ }
+
+ // Write results out to the result channel
+ result <- le
+
+ // quit early
+ case <-quit:
+ return
+ }
+ }
+ }()
+ }
+
+ // Distribute the collected keys to the workers in a go routine
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for i, leaseID := range existing {
+ if i%500 == 0 {
+ m.logger.Trace("expiration: leases loading", "progress", i)
+ }
+
+ select {
+ case <-quit:
+ return
+
+ default:
+ broker <- leaseID
+ }
+ }
+
+ // Close the broker, causing worker routines to exit
+ close(broker)
+ }()
+
+ // Restore each key by pulling from the result chan
+ for i := 0; i < len(existing); i++ {
+ select {
+ case err := <-errs:
+ // Close all go routines
+ close(quit)
+
+ return err
+
+ case le := <-result:
+
+ // If there is no entry, nothing to restore
+ if le == nil {
+ continue
+ }
+
+ // If there is no expiry time, don't do anything
+ if le.ExpireTime.IsZero() {
+ continue
+ }
+
+ // Determine the remaining time to expiration
+ expires := le.ExpireTime.Sub(time.Now())
+ if expires <= 0 {
+ expires = minRevokeDelay
+ }
+
+ // Setup revocation timer
+ m.pending[le.LeaseID] = time.AfterFunc(expires, func() {
+ m.expireID(le.LeaseID)
+ })
+ }
+ }
+
+ // Let all go routines finish
+ wg.Wait()
+
+ if len(m.pending) > 0 {
+ if m.logger.IsInfo() {
+ m.logger.Info("expire: leases restored", "restored_lease_count", len(m.pending))
+ }
+ }
+
+ return nil
+}
+
+// Stop is used to prevent further automatic revocations.
+// This must be called before sealing the view.
+func (m *ExpirationManager) Stop() error {
+ // Stop all the pending expiration timers
+ m.pendingLock.Lock()
+ for _, timer := range m.pending {
+ timer.Stop()
+ }
+ m.pending = make(map[string]*time.Timer)
+ m.pendingLock.Unlock()
+ return nil
+}
+
+// Revoke is used to revoke a secret named by the given LeaseID
+func (m *ExpirationManager) Revoke(leaseID string) error {
+ defer metrics.MeasureSince([]string{"expire", "revoke"}, time.Now())
+
+ return m.revokeCommon(leaseID, false, false)
+}
+
+// revokeCommon does the heavy lifting. If force is true, we ignore a problem
+// during revocation and still remove entries/index/lease timers
+func (m *ExpirationManager) revokeCommon(leaseID string, force, skipToken bool) error {
+ defer metrics.MeasureSince([]string{"expire", "revoke-common"}, time.Now())
+ // Load the entry
+ le, err := m.loadEntry(leaseID)
+ if err != nil {
+ return err
+ }
+
+ // If there is no entry, nothing to revoke
+ if le == nil {
+ return nil
+ }
+
+ // Revoke the entry
+ if !skipToken || le.Auth == nil {
+ if err := m.revokeEntry(le); err != nil {
+ if !force {
+ return err
+ } else {
+ if m.logger.IsWarn() {
+ m.logger.Warn("revocation from the backend failed, but in force mode so ignoring", "error", err)
+ }
+ }
+ }
+ }
+
+ // Delete the entry
+ if err := m.deleteEntry(leaseID); err != nil {
+ return err
+ }
+
+ // Delete the secondary index, but only if it's a leased secret (not auth)
+ if le.Secret != nil {
+ if err := m.removeIndexByToken(le.ClientToken, le.LeaseID); err != nil {
+ return err
+ }
+ }
+
+ // Clear the expiration handler
+ m.pendingLock.Lock()
+ if timer, ok := m.pending[leaseID]; ok {
+ timer.Stop()
+ delete(m.pending, leaseID)
+ }
+ m.pendingLock.Unlock()
+ return nil
+}
+
+// RevokeForce works similarly to RevokePrefix but continues in the case of a
+// revocation error; this is mostly meant for recovery operations
+func (m *ExpirationManager) RevokeForce(prefix string) error {
+ defer metrics.MeasureSince([]string{"expire", "revoke-force"}, time.Now())
+
+ return m.revokePrefixCommon(prefix, true)
+}
+
+// RevokePrefix is used to revoke all secrets with a given prefix.
+// The prefix maps to that of the mount table to make this simpler
+// to reason about.
+func (m *ExpirationManager) RevokePrefix(prefix string) error {
+ defer metrics.MeasureSince([]string{"expire", "revoke-prefix"}, time.Now())
+
+ return m.revokePrefixCommon(prefix, false)
+}
+
+// RevokeByToken is used to revoke all the secrets issued with a given token.
+// This is done by using the secondary index. It also removes the lease entry
+// for the token itself. As a result it should *ONLY* ever be called from the
+// token store's revokeSalted function.
+func (m *ExpirationManager) RevokeByToken(te *TokenEntry) error {
+ defer metrics.MeasureSince([]string{"expire", "revoke-by-token"}, time.Now())
+ // Lookup the leases
+ existing, err := m.lookupByToken(te.ID)
+ if err != nil {
+ return fmt.Errorf("failed to scan for leases: %v", err)
+ }
+
+ // Revoke all the keys
+ for idx, leaseID := range existing {
+ if err := m.Revoke(leaseID); err != nil {
+ return fmt.Errorf("failed to revoke '%s' (%d / %d): %v",
+ leaseID, idx+1, len(existing), err)
+ }
+ }
+
+ if te.Path != "" {
+ tokenLeaseID := path.Join(te.Path, m.tokenStore.SaltID(te.ID))
+
+ // We want to skip the revokeEntry call as that will call back into
+ // revocation logic in the token store, which is what is running this
+ // function in the first place -- it'd be a deadlock loop. Since the only
+ // place that this function is called is revokeSalted in the token store,
+ // we're already revoking the token, so we just want to clean up the lease.
+ // This avoids spurious revocations later in the log when the timer runs
+ // out, and eases up resource usage.
+ return m.revokeCommon(tokenLeaseID, false, true)
+ }
+
+ return nil
+}
+
+func (m *ExpirationManager) revokePrefixCommon(prefix string, force bool) error {
+ // Ensure there is a trailing slash
+ if !strings.HasSuffix(prefix, "/") {
+ prefix = prefix + "/"
+ }
+
+ // Accumulate existing leases
+ sub := m.idView.SubView(prefix)
+ existing, err := logical.CollectKeys(sub)
+ if err != nil {
+ return fmt.Errorf("failed to scan for leases: %v", err)
+ }
+
+ // Revoke all the keys
+ for idx, suffix := range existing {
+ leaseID := prefix + suffix
+ if err := m.revokeCommon(leaseID, force, false); err != nil {
+ return fmt.Errorf("failed to revoke '%s' (%d / %d): %v",
+ leaseID, idx+1, len(existing), err)
+ }
+ }
+ return nil
+}
+
+// Renew is used to renew a secret using the given leaseID
+// and a renew interval. The increment may be ignored.
+func (m *ExpirationManager) Renew(leaseID string, increment time.Duration) (*logical.Response, error) {
+ defer metrics.MeasureSince([]string{"expire", "renew"}, time.Now())
+ // Load the entry
+ le, err := m.loadEntry(leaseID)
+ if err != nil {
+ return nil, err
+ }
+
+ // Check if the lease is renewable
+ if _, err := le.renewable(); err != nil {
+ return nil, err
+ }
+
+ // Attempt to renew the entry
+ resp, err := m.renewEntry(le, increment)
+ if err != nil {
+ return nil, err
+ }
+
+ // Fast-path if there is no lease
+ if resp == nil || resp.Secret == nil || !resp.Secret.LeaseEnabled() {
+ return resp, nil
+ }
+
+ // Validate the lease
+ if err := resp.Secret.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Attach the LeaseID
+ resp.Secret.LeaseID = leaseID
+
+ // Update the lease entry
+ le.Data = resp.Data
+ le.Secret = resp.Secret
+ le.ExpireTime = resp.Secret.ExpirationTime()
+ le.LastRenewalTime = time.Now()
+ if err := m.persistEntry(le); err != nil {
+ return nil, err
+ }
+
+ // Update the expiration time
+ m.updatePending(le, resp.Secret.LeaseTotal())
+
+ // Return the response
+ return resp, nil
+}
+
+// RenewToken is used to renew a token which does not need to
+// invoke a logical backend.
+func (m *ExpirationManager) RenewToken(req *logical.Request, source string, token string,
+ increment time.Duration) (*logical.Response, error) {
+ defer metrics.MeasureSince([]string{"expire", "renew-token"}, time.Now())
+ // Compute the Lease ID
+ leaseID := path.Join(source, m.tokenStore.SaltID(token))
+
+ // Load the entry
+ le, err := m.loadEntry(leaseID)
+ if err != nil {
+ return nil, err
+ }
+
+ // Check if the lease is renewable. Note that this also checks for a nil
+ // lease and errors in that case as well.
+ if _, err := le.renewable(); err != nil {
+ return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
+ }
+
+ // Attempt to renew the auth entry
+ resp, err := m.renewAuthEntry(req, le, increment)
+ if err != nil {
+ return nil, err
+ }
+
+ if resp == nil {
+ return nil, nil
+ }
+
+ if resp.IsError() {
+ return &logical.Response{
+ Data: resp.Data,
+ }, nil
+ }
+
+ if resp.Auth == nil || !resp.Auth.LeaseEnabled() {
+ return &logical.Response{
+ Auth: resp.Auth,
+ }, nil
+ }
+
+ // Attach the ClientToken
+ resp.Auth.ClientToken = token
+ resp.Auth.Increment = 0
+
+ // Update the lease entry
+ le.Auth = resp.Auth
+ le.ExpireTime = resp.Auth.ExpirationTime()
+ le.LastRenewalTime = time.Now()
+ if err := m.persistEntry(le); err != nil {
+ return nil, err
+ }
+
+ // Update the expiration time
+ m.updatePending(le, resp.Auth.LeaseTotal())
+ return &logical.Response{
+ Auth: resp.Auth,
+ }, nil
+}
+
+// Register is used to take a request and response with an associated
+// lease. The secret gets assigned a LeaseID and the management of
+// of lease is assumed by the expiration manager.
+func (m *ExpirationManager) Register(req *logical.Request, resp *logical.Response) (string, error) {
+ defer metrics.MeasureSince([]string{"expire", "register"}, time.Now())
+ // Ignore if there is no leased secret
+ if resp == nil || resp.Secret == nil {
+ return "", nil
+ }
+
+ // Validate the secret
+ if err := resp.Secret.Validate(); err != nil {
+ return "", err
+ }
+
+ // Create a lease entry
+ leaseUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ return "", err
+ }
+ le := leaseEntry{
+ LeaseID: path.Join(req.Path, leaseUUID),
+ ClientToken: req.ClientToken,
+ Path: req.Path,
+ Data: resp.Data,
+ Secret: resp.Secret,
+ IssueTime: time.Now(),
+ ExpireTime: resp.Secret.ExpirationTime(),
+ }
+
+ // Encode the entry
+ if err := m.persistEntry(&le); err != nil {
+ return "", err
+ }
+
+ // Maintain secondary index by token
+ if err := m.createIndexByToken(le.ClientToken, le.LeaseID); err != nil {
+ return "", err
+ }
+
+ // Setup revocation timer if there is a lease
+ m.updatePending(&le, resp.Secret.LeaseTotal())
+
+ // Done
+ return le.LeaseID, nil
+}
+
+// RegisterAuth is used to take an Auth response with an associated lease.
+// The token does not get a LeaseID, but the lease management is handled by
+// the expiration manager.
+func (m *ExpirationManager) RegisterAuth(source string, auth *logical.Auth) error {
+ defer metrics.MeasureSince([]string{"expire", "register-auth"}, time.Now())
+
+ // Create a lease entry
+ le := leaseEntry{
+ LeaseID: path.Join(source, m.tokenStore.SaltID(auth.ClientToken)),
+ ClientToken: auth.ClientToken,
+ Auth: auth,
+ Path: source,
+ IssueTime: time.Now(),
+ ExpireTime: auth.ExpirationTime(),
+ }
+
+ // Encode the entry
+ if err := m.persistEntry(&le); err != nil {
+ return err
+ }
+
+ // Setup revocation timer
+ m.updatePending(&le, auth.LeaseTotal())
+ return nil
+}
+
+// FetchLeaseTimesByToken is a helper function to use token values to compute
+// the leaseID, rather than pushing that logic back into the token store.
+func (m *ExpirationManager) FetchLeaseTimesByToken(source, token string) (*leaseEntry, error) {
+ defer metrics.MeasureSince([]string{"expire", "fetch-lease-times-by-token"}, time.Now())
+
+ // Compute the Lease ID
+ leaseID := path.Join(source, m.tokenStore.SaltID(token))
+ return m.FetchLeaseTimes(leaseID)
+}
+
+// FetchLeaseTimes is used to fetch the issue time, expiration time, and last
+// renewed time of a lease entry. It returns a leaseEntry itself, but with only
+// those values copied over.
+func (m *ExpirationManager) FetchLeaseTimes(leaseID string) (*leaseEntry, error) {
+ defer metrics.MeasureSince([]string{"expire", "fetch-lease-times"}, time.Now())
+
+ // Load the entry
+ le, err := m.loadEntry(leaseID)
+ if err != nil {
+ return nil, err
+ }
+ if le == nil {
+ return nil, nil
+ }
+
+ ret := &leaseEntry{
+ IssueTime: le.IssueTime,
+ ExpireTime: le.ExpireTime,
+ LastRenewalTime: le.LastRenewalTime,
+ }
+ if le.Secret != nil {
+ ret.Secret = &logical.Secret{}
+ ret.Secret.Renewable = le.Secret.Renewable
+ ret.Secret.TTL = le.Secret.TTL
+ }
+ if le.Auth != nil {
+ ret.Auth = &logical.Auth{}
+ ret.Auth.Renewable = le.Auth.Renewable
+ ret.Auth.TTL = le.Auth.TTL
+ }
+
+ return ret, nil
+}
+
+// updatePending is used to update a pending invocation for a lease
+func (m *ExpirationManager) updatePending(le *leaseEntry, leaseTotal time.Duration) {
+ m.pendingLock.Lock()
+ defer m.pendingLock.Unlock()
+
+ // Check for an existing timer
+ timer, ok := m.pending[le.LeaseID]
+
+ // Create entry if it does not exist
+ if !ok && leaseTotal > 0 {
+ timer := time.AfterFunc(leaseTotal, func() {
+ m.expireID(le.LeaseID)
+ })
+ m.pending[le.LeaseID] = timer
+ return
+ }
+
+ // Delete the timer if the expiration time is zero
+ if ok && leaseTotal == 0 {
+ timer.Stop()
+ delete(m.pending, le.LeaseID)
+ return
+ }
+
+ // Extend the timer by the lease total
+ if ok && leaseTotal > 0 {
+ timer.Reset(leaseTotal)
+ }
+}
+
+// expireID is invoked when a given ID is expired
+func (m *ExpirationManager) expireID(leaseID string) {
+ // Clear from the pending expiration
+ m.pendingLock.Lock()
+ delete(m.pending, leaseID)
+ m.pendingLock.Unlock()
+
+ for attempt := uint(0); attempt < maxRevokeAttempts; attempt++ {
+ err := m.Revoke(leaseID)
+ if err == nil {
+ if m.logger.IsInfo() {
+ m.logger.Info("expire: revoked lease", "lease_id", leaseID)
+ }
+ return
+ }
+ m.logger.Error("expire: failed to revoke lease", "lease_id", leaseID, "error", err)
+ time.Sleep((1 << attempt) * revokeRetryBase)
+ }
+ m.logger.Error("expire: maximum revoke attempts reached", "lease_id", leaseID)
+}
+
+// revokeEntry is used to attempt revocation of an internal entry
+func (m *ExpirationManager) revokeEntry(le *leaseEntry) error {
+ // Revocation of login tokens is special since we can by-pass the
+ // backend and directly interact with the token store
+ if le.Auth != nil {
+ if err := m.tokenStore.RevokeTree(le.Auth.ClientToken); err != nil {
+ return fmt.Errorf("failed to revoke token: %v", err)
+ }
+
+ return nil
+ }
+
+ // Handle standard revocation via backends
+ resp, err := m.router.Route(logical.RevokeRequest(
+ le.Path, le.Secret, le.Data))
+ if err != nil || (resp != nil && resp.IsError()) {
+ return fmt.Errorf("failed to revoke entry: resp:%#v err:%s", resp, err)
+ }
+ return nil
+}
+
+// renewEntry is used to attempt renew of an internal entry
+func (m *ExpirationManager) renewEntry(le *leaseEntry, increment time.Duration) (*logical.Response, error) {
+ secret := *le.Secret
+ secret.IssueTime = le.IssueTime
+ secret.Increment = increment
+ secret.LeaseID = ""
+
+ req := logical.RenewRequest(le.Path, &secret, le.Data)
+ resp, err := m.router.Route(req)
+ if err != nil || (resp != nil && resp.IsError()) {
+ return nil, fmt.Errorf("failed to renew entry: resp:%#v err:%s", resp, err)
+ }
+ return resp, nil
+}
+
+// renewAuthEntry is used to attempt renew of an auth entry. Only the token
+// store should get the actual token ID intact.
+func (m *ExpirationManager) renewAuthEntry(req *logical.Request, le *leaseEntry, increment time.Duration) (*logical.Response, error) {
+ auth := *le.Auth
+ auth.IssueTime = le.IssueTime
+ auth.Increment = increment
+ if strings.HasPrefix(le.Path, "auth/token/") {
+ auth.ClientToken = le.ClientToken
+ } else {
+ auth.ClientToken = ""
+ }
+
+ authReq := logical.RenewAuthRequest(le.Path, &auth, nil)
+ authReq.Connection = req.Connection
+ resp, err := m.router.Route(authReq)
+ if err != nil {
+ return nil, fmt.Errorf("failed to renew entry: %v", err)
+ }
+ return resp, nil
+}
+
+// loadEntry is used to read a lease entry
+func (m *ExpirationManager) loadEntry(leaseID string) (*leaseEntry, error) {
+ out, err := m.idView.Get(leaseID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read lease entry: %v", err)
+ }
+ if out == nil {
+ return nil, nil
+ }
+ le, err := decodeLeaseEntry(out.Value)
+ if err != nil {
+ return nil, fmt.Errorf("failed to decode lease entry: %v", err)
+ }
+ return le, nil
+}
+
+// persistEntry is used to persist a lease entry
+func (m *ExpirationManager) persistEntry(le *leaseEntry) error {
+ // Encode the entry
+ buf, err := le.encode()
+ if err != nil {
+ return fmt.Errorf("failed to encode lease entry: %v", err)
+ }
+
+ // Write out to the view
+ ent := logical.StorageEntry{
+ Key: le.LeaseID,
+ Value: buf,
+ }
+ if err := m.idView.Put(&ent); err != nil {
+ return fmt.Errorf("failed to persist lease entry: %v", err)
+ }
+ return nil
+}
+
+// deleteEntry is used to delete a lease entry
+func (m *ExpirationManager) deleteEntry(leaseID string) error {
+ if err := m.idView.Delete(leaseID); err != nil {
+ return fmt.Errorf("failed to delete lease entry: %v", err)
+ }
+ return nil
+}
+
+// createIndexByToken creates a secondary index from the token to a lease entry
+func (m *ExpirationManager) createIndexByToken(token, leaseID string) error {
+ ent := logical.StorageEntry{
+ Key: m.tokenStore.SaltID(token) + "/" + m.tokenStore.SaltID(leaseID),
+ Value: []byte(leaseID),
+ }
+ if err := m.tokenView.Put(&ent); err != nil {
+ return fmt.Errorf("failed to persist lease index entry: %v", err)
+ }
+ return nil
+}
+
+// indexByToken looks up the secondary index from the token to a lease entry
+func (m *ExpirationManager) indexByToken(token, leaseID string) (*logical.StorageEntry, error) {
+ key := m.tokenStore.SaltID(token) + "/" + m.tokenStore.SaltID(leaseID)
+ entry, err := m.tokenView.Get(key)
+ if err != nil {
+ return nil, fmt.Errorf("failed to look up secondary index entry")
+ }
+ return entry, nil
+}
+
+// removeIndexByToken removes the secondary index from the token to a lease entry
+func (m *ExpirationManager) removeIndexByToken(token, leaseID string) error {
+ key := m.tokenStore.SaltID(token) + "/" + m.tokenStore.SaltID(leaseID)
+ if err := m.tokenView.Delete(key); err != nil {
+ return fmt.Errorf("failed to delete lease index entry: %v", err)
+ }
+ return nil
+}
+
+// lookupByToken is used to lookup all the leaseID's via the
+func (m *ExpirationManager) lookupByToken(token string) ([]string, error) {
+ // Scan via the index for sub-leases
+ prefix := m.tokenStore.SaltID(token) + "/"
+ subKeys, err := m.tokenView.List(prefix)
+ if err != nil {
+ return nil, fmt.Errorf("failed to list leases: %v", err)
+ }
+
+ // Read each index entry
+ leaseIDs := make([]string, 0, len(subKeys))
+ for _, sub := range subKeys {
+ out, err := m.tokenView.Get(prefix + sub)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read lease index: %v", err)
+ }
+ if out == nil {
+ continue
+ }
+ leaseIDs = append(leaseIDs, string(out.Value))
+ }
+ return leaseIDs, nil
+}
+
+// emitMetrics is invoked periodically to emit statistics
+func (m *ExpirationManager) emitMetrics() {
+ m.pendingLock.Lock()
+ num := len(m.pending)
+ m.pendingLock.Unlock()
+ metrics.SetGauge([]string{"expire", "num_leases"}, float32(num))
+}
+
+// leaseEntry is used to structure the values the expiration
+// manager stores. This is used to handle renew and revocation.
+type leaseEntry struct {
+ LeaseID string `json:"lease_id"`
+ ClientToken string `json:"client_token"`
+ Path string `json:"path"`
+ Data map[string]interface{} `json:"data"`
+ Secret *logical.Secret `json:"secret"`
+ Auth *logical.Auth `json:"auth"`
+ IssueTime time.Time `json:"issue_time"`
+ ExpireTime time.Time `json:"expire_time"`
+ LastRenewalTime time.Time `json:"last_renewal_time"`
+}
+
+// encode is used to JSON encode the lease entry
+func (le *leaseEntry) encode() ([]byte, error) {
+ return json.Marshal(le)
+}
+
+func (le *leaseEntry) renewable() (bool, error) {
+ var err error
+ switch {
+ // If there is no entry, cannot review
+ case le == nil || le.ExpireTime.IsZero():
+ err = fmt.Errorf("lease not found or lease is not renewable")
+ // Determine if the lease is expired
+ case le.ExpireTime.Before(time.Now()):
+ err = fmt.Errorf("lease expired")
+ // Determine if the lease is renewable
+ case le.Secret != nil && !le.Secret.Renewable:
+ err = fmt.Errorf("lease is not renewable")
+ case le.Auth != nil && !le.Auth.Renewable:
+ err = fmt.Errorf("lease is not renewable")
+ }
+
+ if err != nil {
+ return false, err
+ }
+ return true, nil
+}
+
+func (le *leaseEntry) ttl() int64 {
+ return int64(le.ExpireTime.Sub(time.Now().Round(time.Second)).Seconds())
+}
+
+// decodeLeaseEntry is used to reverse encode and return a new entry
+func decodeLeaseEntry(buf []byte) (*leaseEntry, error) {
+ out := new(leaseEntry)
+ return out, jsonutil.DecodeJSON(buf, out)
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/expiration_test.go b/vendor/github.com/hashicorp/vault/vault/expiration_test.go
new file mode 100644
index 0000000..ced6b42
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/expiration_test.go
@@ -0,0 +1,1211 @@
+package vault
+
+import (
+ "fmt"
+ "os"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ "github.com/hashicorp/vault/physical"
+ log "github.com/mgutz/logxi/v1"
+)
+
+var (
+ testImagePull sync.Once
+)
+
+// mockExpiration returns a mock expiration manager
+func mockExpiration(t testing.TB) *ExpirationManager {
+ _, ts, _, _ := TestCoreWithTokenStore(t)
+ return ts.expiration
+}
+
+func mockBackendExpiration(t testing.TB, backend physical.Backend) (*Core, *ExpirationManager) {
+ c, ts, _, _ := TestCoreWithBackendTokenStore(t, backend)
+ return c, ts.expiration
+}
+
+func BenchmarkExpiration_Restore_Etcd(b *testing.B) {
+ addr := os.Getenv("PHYSICAL_BACKEND_BENCHMARK_ADDR")
+ randPath := fmt.Sprintf("vault-%d/", time.Now().Unix())
+
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+ physicalBackend, err := physical.NewBackend("etcd", logger, map[string]string{
+ "address": addr,
+ "path": randPath,
+ "max_parallel": "256",
+ })
+ if err != nil {
+ b.Fatalf("err: %s", err)
+ }
+
+ benchmarkExpirationBackend(b, physicalBackend, 10000) // 10,000 leases
+}
+
+func BenchmarkExpiration_Restore_Consul(b *testing.B) {
+ addr := os.Getenv("PHYSICAL_BACKEND_BENCHMARK_ADDR")
+ randPath := fmt.Sprintf("vault-%d/", time.Now().Unix())
+
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+ physicalBackend, err := physical.NewBackend("consul", logger, map[string]string{
+ "address": addr,
+ "path": randPath,
+ "max_parallel": "256",
+ })
+ if err != nil {
+ b.Fatalf("err: %s", err)
+ }
+
+ benchmarkExpirationBackend(b, physicalBackend, 10000) // 10,000 leases
+}
+
+func BenchmarkExpiration_Restore_InMem(b *testing.B) {
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+ benchmarkExpirationBackend(b, physical.NewInmem(logger), 100000) // 100,000 Leases
+}
+
+func benchmarkExpirationBackend(b *testing.B, physicalBackend physical.Backend, numLeases int) {
+ c, exp := mockBackendExpiration(b, physicalBackend)
+ noop := &NoopBackend{}
+ view := NewBarrierView(c.barrier, "logical/")
+ meUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ b.Fatal(err)
+ }
+ exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID}, view)
+
+ // Register fake leases
+ for i := 0; i < numLeases; i++ {
+ pathUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ req := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "prod/aws/" + pathUUID,
+ }
+ resp := &logical.Response{
+ Secret: &logical.Secret{
+ LeaseOptions: logical.LeaseOptions{
+ TTL: 400 * time.Second,
+ },
+ },
+ Data: map[string]interface{}{
+ "access_key": "xyz",
+ "secret_key": "abcd",
+ },
+ }
+ _, err = exp.Register(req, resp)
+ if err != nil {
+ b.Fatalf("err: %v", err)
+ }
+ }
+
+ // Stop everything
+ err = exp.Stop()
+ if err != nil {
+ b.Fatalf("err: %v", err)
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ err = exp.Restore()
+ // Restore
+ if err != nil {
+ b.Fatalf("err: %v", err)
+ }
+ }
+ b.StopTimer()
+}
+
+func TestExpiration_Restore(t *testing.T) {
+ exp := mockExpiration(t)
+ noop := &NoopBackend{}
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "logical/")
+ meUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID}, view)
+
+ paths := []string{
+ "prod/aws/foo",
+ "prod/aws/sub/bar",
+ "prod/aws/zip",
+ }
+ for _, path := range paths {
+ req := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: path,
+ }
+ resp := &logical.Response{
+ Secret: &logical.Secret{
+ LeaseOptions: logical.LeaseOptions{
+ TTL: 20 * time.Millisecond,
+ },
+ },
+ Data: map[string]interface{}{
+ "access_key": "xyz",
+ "secret_key": "abcd",
+ },
+ }
+ _, err := exp.Register(req, resp)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ }
+
+ // Stop everything
+ err = exp.Stop()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Restore
+ err = exp.Restore()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Ensure all are reaped
+ start := time.Now()
+ for time.Now().Sub(start) < time.Second {
+ noop.Lock()
+ less := len(noop.Requests) < 3
+ noop.Unlock()
+
+ if less {
+ time.Sleep(5 * time.Millisecond)
+ continue
+ }
+ break
+ }
+ for _, req := range noop.Requests {
+ if req.Operation != logical.RevokeOperation {
+ t.Fatalf("Bad: %v", req)
+ }
+ }
+}
+
+func TestExpiration_Register(t *testing.T) {
+ exp := mockExpiration(t)
+ req := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "prod/aws/foo",
+ }
+ resp := &logical.Response{
+ Secret: &logical.Secret{
+ LeaseOptions: logical.LeaseOptions{
+ TTL: time.Hour,
+ },
+ },
+ Data: map[string]interface{}{
+ "access_key": "xyz",
+ "secret_key": "abcd",
+ },
+ }
+
+ id, err := exp.Register(req, resp)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if !strings.HasPrefix(id, req.Path) {
+ t.Fatalf("bad: %s", id)
+ }
+
+ if len(id) <= len(req.Path) {
+ t.Fatalf("bad: %s", id)
+ }
+}
+
+func TestExpiration_RegisterAuth(t *testing.T) {
+ exp := mockExpiration(t)
+ root, err := exp.tokenStore.rootToken()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ auth := &logical.Auth{
+ ClientToken: root.ID,
+ LeaseOptions: logical.LeaseOptions{
+ TTL: time.Hour,
+ },
+ }
+
+ err = exp.RegisterAuth("auth/github/login", auth)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func TestExpiration_RegisterAuth_NoLease(t *testing.T) {
+ exp := mockExpiration(t)
+ root, err := exp.tokenStore.rootToken()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ auth := &logical.Auth{
+ ClientToken: root.ID,
+ }
+
+ err = exp.RegisterAuth("auth/github/login", auth)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should not be able to renew, no expiration
+ resp, err := exp.RenewToken(&logical.Request{}, "auth/github/login", root.ID, 0)
+ if err != nil && (err != logical.ErrInvalidRequest || (resp != nil && resp.IsError() && resp.Error().Error() != "lease not found or lease is not renewable")) {
+ t.Fatalf("bad: err:%v resp:%#v", err, resp)
+ }
+ if resp == nil {
+ t.Fatal("expected a response")
+ }
+
+ // Wait and check token is not invalidated
+ time.Sleep(20 * time.Millisecond)
+
+ // Verify token does not get revoked
+ out, err := exp.tokenStore.Lookup(root.ID)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out == nil {
+ t.Fatalf("missing token")
+ }
+}
+
+func TestExpiration_Revoke(t *testing.T) {
+ exp := mockExpiration(t)
+ noop := &NoopBackend{}
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "logical/")
+ meUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID}, view)
+
+ req := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "prod/aws/foo",
+ }
+ resp := &logical.Response{
+ Secret: &logical.Secret{
+ LeaseOptions: logical.LeaseOptions{
+ TTL: time.Hour,
+ },
+ },
+ Data: map[string]interface{}{
+ "access_key": "xyz",
+ "secret_key": "abcd",
+ },
+ }
+
+ id, err := exp.Register(req, resp)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if err := exp.Revoke(id); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ req = noop.Requests[0]
+ if req.Operation != logical.RevokeOperation {
+ t.Fatalf("Bad: %v", req)
+ }
+}
+
+func TestExpiration_RevokeOnExpire(t *testing.T) {
+ exp := mockExpiration(t)
+ noop := &NoopBackend{}
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "logical/")
+ meUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID}, view)
+
+ req := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "prod/aws/foo",
+ }
+ resp := &logical.Response{
+ Secret: &logical.Secret{
+ LeaseOptions: logical.LeaseOptions{
+ TTL: 20 * time.Millisecond,
+ },
+ },
+ Data: map[string]interface{}{
+ "access_key": "xyz",
+ "secret_key": "abcd",
+ },
+ }
+
+ _, err = exp.Register(req, resp)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ start := time.Now()
+ for time.Now().Sub(start) < time.Second {
+ req = nil
+
+ noop.Lock()
+ if len(noop.Requests) > 0 {
+ req = noop.Requests[0]
+ }
+ noop.Unlock()
+ if req == nil {
+ time.Sleep(5 * time.Millisecond)
+ continue
+ }
+ if req.Operation != logical.RevokeOperation {
+ t.Fatalf("Bad: %v", req)
+ }
+
+ break
+ }
+}
+
+func TestExpiration_RevokePrefix(t *testing.T) {
+ exp := mockExpiration(t)
+ noop := &NoopBackend{}
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "logical/")
+ meUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID}, view)
+
+ paths := []string{
+ "prod/aws/foo",
+ "prod/aws/sub/bar",
+ "prod/aws/zip",
+ }
+ for _, path := range paths {
+ req := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: path,
+ }
+ resp := &logical.Response{
+ Secret: &logical.Secret{
+ LeaseOptions: logical.LeaseOptions{
+ TTL: 20 * time.Millisecond,
+ },
+ },
+ Data: map[string]interface{}{
+ "access_key": "xyz",
+ "secret_key": "abcd",
+ },
+ }
+ _, err := exp.Register(req, resp)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ }
+
+ // Should nuke all the keys
+ if err := exp.RevokePrefix("prod/aws/"); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if len(noop.Requests) != 3 {
+ t.Fatalf("Bad: %v", noop.Requests)
+ }
+ for _, req := range noop.Requests {
+ if req.Operation != logical.RevokeOperation {
+ t.Fatalf("Bad: %v", req)
+ }
+ }
+
+ expect := []string{
+ "foo",
+ "sub/bar",
+ "zip",
+ }
+ sort.Strings(noop.Paths)
+ sort.Strings(expect)
+ if !reflect.DeepEqual(noop.Paths, expect) {
+ t.Fatalf("bad: %v", noop.Paths)
+ }
+}
+
+func TestExpiration_RevokeByToken(t *testing.T) {
+ exp := mockExpiration(t)
+ noop := &NoopBackend{}
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "logical/")
+ meUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID}, view)
+
+ paths := []string{
+ "prod/aws/foo",
+ "prod/aws/sub/bar",
+ "prod/aws/zip",
+ }
+ for _, path := range paths {
+ req := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: path,
+ ClientToken: "foobarbaz",
+ }
+ resp := &logical.Response{
+ Secret: &logical.Secret{
+ LeaseOptions: logical.LeaseOptions{
+ TTL: 20 * time.Millisecond,
+ },
+ },
+ Data: map[string]interface{}{
+ "access_key": "xyz",
+ "secret_key": "abcd",
+ },
+ }
+ _, err := exp.Register(req, resp)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ }
+
+ // Should nuke all the keys
+ te := &TokenEntry{
+ ID: "foobarbaz",
+ }
+ if err := exp.RevokeByToken(te); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if len(noop.Requests) != 3 {
+ t.Fatalf("Bad: %v", noop.Requests)
+ }
+ for _, req := range noop.Requests {
+ if req.Operation != logical.RevokeOperation {
+ t.Fatalf("Bad: %v", req)
+ }
+ }
+
+ expect := []string{
+ "foo",
+ "sub/bar",
+ "zip",
+ }
+ sort.Strings(noop.Paths)
+ sort.Strings(expect)
+ if !reflect.DeepEqual(noop.Paths, expect) {
+ t.Fatalf("bad: %v", noop.Paths)
+ }
+}
+
+func TestExpiration_RenewToken(t *testing.T) {
+ exp := mockExpiration(t)
+ root, err := exp.tokenStore.rootToken()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Register a token
+ auth := &logical.Auth{
+ ClientToken: root.ID,
+ LeaseOptions: logical.LeaseOptions{
+ TTL: time.Hour,
+ Renewable: true,
+ },
+ }
+ err = exp.RegisterAuth("auth/token/login", auth)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Renew the token
+ out, err := exp.RenewToken(&logical.Request{}, "auth/token/login", root.ID, 0)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if auth.ClientToken != out.Auth.ClientToken {
+ t.Fatalf("Bad: %#v", out)
+ }
+}
+
+func TestExpiration_RenewToken_NotRenewable(t *testing.T) {
+ exp := mockExpiration(t)
+ root, err := exp.tokenStore.rootToken()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Register a token
+ auth := &logical.Auth{
+ ClientToken: root.ID,
+ LeaseOptions: logical.LeaseOptions{
+ TTL: time.Hour,
+ Renewable: false,
+ },
+ }
+ err = exp.RegisterAuth("auth/github/login", auth)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Attempt to renew the token
+ resp, err := exp.RenewToken(&logical.Request{}, "auth/github/login", root.ID, 0)
+ if err != nil && (err != logical.ErrInvalidRequest || (resp != nil && resp.IsError() && resp.Error().Error() != "lease is not renewable")) {
+ t.Fatalf("bad: err:%v resp:%#v", err, resp)
+ }
+ if resp == nil {
+ t.Fatal("expected a response")
+ }
+
+}
+
+func TestExpiration_Renew(t *testing.T) {
+ exp := mockExpiration(t)
+ noop := &NoopBackend{}
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "logical/")
+ meUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID}, view)
+
+ req := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "prod/aws/foo",
+ }
+ resp := &logical.Response{
+ Secret: &logical.Secret{
+ LeaseOptions: logical.LeaseOptions{
+ TTL: 20 * time.Millisecond,
+ Renewable: true,
+ },
+ },
+ Data: map[string]interface{}{
+ "access_key": "xyz",
+ "secret_key": "abcd",
+ },
+ }
+
+ id, err := exp.Register(req, resp)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ noop.Response = &logical.Response{
+ Secret: &logical.Secret{
+ LeaseOptions: logical.LeaseOptions{
+ TTL: 20 * time.Millisecond,
+ },
+ },
+ Data: map[string]interface{}{
+ "access_key": "123",
+ "secret_key": "abcd",
+ },
+ }
+
+ out, err := exp.Renew(id, 0)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ noop.Lock()
+ defer noop.Unlock()
+
+ if !reflect.DeepEqual(out, noop.Response) {
+ t.Fatalf("Bad: %#v", out)
+ }
+
+ if len(noop.Requests) != 1 {
+ t.Fatalf("Bad: %#v", noop.Requests)
+ }
+ req = noop.Requests[0]
+ if req.Operation != logical.RenewOperation {
+ t.Fatalf("Bad: %v", req)
+ }
+}
+
+func TestExpiration_Renew_NotRenewable(t *testing.T) {
+ exp := mockExpiration(t)
+ noop := &NoopBackend{}
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "logical/")
+ meUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID}, view)
+
+ req := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "prod/aws/foo",
+ }
+ resp := &logical.Response{
+ Secret: &logical.Secret{
+ LeaseOptions: logical.LeaseOptions{
+ TTL: 20 * time.Millisecond,
+ Renewable: false,
+ },
+ },
+ Data: map[string]interface{}{
+ "access_key": "xyz",
+ "secret_key": "abcd",
+ },
+ }
+
+ id, err := exp.Register(req, resp)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ _, err = exp.Renew(id, 0)
+ if err.Error() != "lease is not renewable" {
+ t.Fatalf("err: %v", err)
+ }
+
+ noop.Lock()
+ defer noop.Unlock()
+
+ if len(noop.Requests) != 0 {
+ t.Fatalf("Bad: %#v", noop.Requests)
+ }
+}
+
+func TestExpiration_Renew_RevokeOnExpire(t *testing.T) {
+ exp := mockExpiration(t)
+ noop := &NoopBackend{}
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "logical/")
+ meUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID}, view)
+
+ req := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "prod/aws/foo",
+ }
+ resp := &logical.Response{
+ Secret: &logical.Secret{
+ LeaseOptions: logical.LeaseOptions{
+ TTL: 20 * time.Millisecond,
+ Renewable: true,
+ },
+ },
+ Data: map[string]interface{}{
+ "access_key": "xyz",
+ "secret_key": "abcd",
+ },
+ }
+
+ id, err := exp.Register(req, resp)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ noop.Response = &logical.Response{
+ Secret: &logical.Secret{
+ LeaseOptions: logical.LeaseOptions{
+ TTL: 20 * time.Millisecond,
+ },
+ },
+ Data: map[string]interface{}{
+ "access_key": "123",
+ "secret_key": "abcd",
+ },
+ }
+
+ _, err = exp.Renew(id, 0)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ start := time.Now()
+ for time.Now().Sub(start) < time.Second {
+ req = nil
+
+ noop.Lock()
+ if len(noop.Requests) >= 2 {
+ req = noop.Requests[1]
+ }
+ noop.Unlock()
+
+ if req == nil {
+ time.Sleep(5 * time.Millisecond)
+ continue
+ }
+ if req.Operation != logical.RevokeOperation {
+ t.Fatalf("Bad: %v", req)
+ }
+ break
+ }
+}
+
+func TestExpiration_revokeEntry(t *testing.T) {
+ exp := mockExpiration(t)
+
+ noop := &NoopBackend{}
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "logical/")
+ meUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ exp.router.Mount(noop, "", &MountEntry{UUID: meUUID}, view)
+
+ le := &leaseEntry{
+ LeaseID: "foo/bar/1234",
+ Path: "foo/bar",
+ Data: map[string]interface{}{
+ "testing": true,
+ },
+ Secret: &logical.Secret{
+ LeaseOptions: logical.LeaseOptions{
+ TTL: time.Minute,
+ },
+ },
+ IssueTime: time.Now(),
+ ExpireTime: time.Now(),
+ }
+
+ err = exp.revokeEntry(le)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ noop.Lock()
+ defer noop.Unlock()
+
+ req := noop.Requests[0]
+ if req.Operation != logical.RevokeOperation {
+ t.Fatalf("Bad: %v", req)
+ }
+ if req.Path != le.Path {
+ t.Fatalf("Bad: %v", req)
+ }
+ if !reflect.DeepEqual(req.Data, le.Data) {
+ t.Fatalf("Bad: %v", req)
+ }
+}
+
+func TestExpiration_revokeEntry_token(t *testing.T) {
+ exp := mockExpiration(t)
+ root, err := exp.tokenStore.rootToken()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // N.B.: Vault doesn't allow both a secret and auth to be returned, but the
+ // reason for both is that auth needs to be included in order to use the
+ // token store as it's the only mounted backend, *but* RegisterAuth doesn't
+ // actually create the index by token, only Register (for a Secret) does.
+ // So without the Secret we don't do anything when removing the index which
+ // (at the time of writing) now fails because a bug causing every token
+ // expiration to do an extra delete to a non-existent key has been fixed,
+ // and this test relies on this nonstandard behavior.
+ le := &leaseEntry{
+ LeaseID: "foo/bar/1234",
+ Auth: &logical.Auth{
+ ClientToken: root.ID,
+ LeaseOptions: logical.LeaseOptions{
+ TTL: time.Minute,
+ },
+ },
+ Secret: &logical.Secret{
+ LeaseOptions: logical.LeaseOptions{
+ TTL: time.Minute,
+ },
+ },
+ ClientToken: root.ID,
+ Path: "foo/bar",
+ IssueTime: time.Now(),
+ ExpireTime: time.Now(),
+ }
+
+ if err := exp.persistEntry(le); err != nil {
+ t.Fatalf("error persisting entry: %v", err)
+ }
+ if err := exp.createIndexByToken(le.ClientToken, le.LeaseID); err != nil {
+ t.Fatalf("error creating secondary index: %v", err)
+ }
+ exp.updatePending(le, le.Secret.LeaseTotal())
+
+ indexEntry, err := exp.indexByToken(le.ClientToken, le.LeaseID)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if indexEntry == nil {
+ t.Fatalf("err: should have found a secondary index entry")
+ }
+
+ err = exp.revokeEntry(le)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ out, err := exp.tokenStore.Lookup(le.ClientToken)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out != nil {
+ t.Fatalf("bad: %v", out)
+ }
+
+ indexEntry, err = exp.indexByToken(le.ClientToken, le.LeaseID)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if indexEntry != nil {
+ t.Fatalf("err: should not have found a secondary index entry")
+ }
+}
+
+func TestExpiration_renewEntry(t *testing.T) {
+ exp := mockExpiration(t)
+
+ noop := &NoopBackend{
+ Response: &logical.Response{
+ Secret: &logical.Secret{
+ LeaseOptions: logical.LeaseOptions{
+ Renewable: true,
+ TTL: time.Hour,
+ },
+ },
+ Data: map[string]interface{}{
+ "testing": false,
+ },
+ },
+ }
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "logical/")
+ meUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ exp.router.Mount(noop, "", &MountEntry{UUID: meUUID}, view)
+
+ le := &leaseEntry{
+ LeaseID: "foo/bar/1234",
+ Path: "foo/bar",
+ Data: map[string]interface{}{
+ "testing": true,
+ },
+ Secret: &logical.Secret{
+ LeaseOptions: logical.LeaseOptions{
+ TTL: time.Minute,
+ },
+ },
+ IssueTime: time.Now(),
+ ExpireTime: time.Now(),
+ }
+
+ resp, err := exp.renewEntry(le, time.Second)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ noop.Lock()
+ defer noop.Unlock()
+
+ if !reflect.DeepEqual(resp, noop.Response) {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ req := noop.Requests[0]
+ if req.Operation != logical.RenewOperation {
+ t.Fatalf("Bad: %v", req)
+ }
+ if req.Path != le.Path {
+ t.Fatalf("Bad: %v", req)
+ }
+ if !reflect.DeepEqual(req.Data, le.Data) {
+ t.Fatalf("Bad: %v", req)
+ }
+ if req.Secret.Increment != time.Second {
+ t.Fatalf("Bad: %v", req)
+ }
+ if req.Secret.IssueTime.IsZero() {
+ t.Fatalf("Bad: %v", req)
+ }
+}
+
+func TestExpiration_renewAuthEntry(t *testing.T) {
+ exp := mockExpiration(t)
+
+ noop := &NoopBackend{
+ Response: &logical.Response{
+ Auth: &logical.Auth{
+ LeaseOptions: logical.LeaseOptions{
+ Renewable: true,
+ TTL: time.Hour,
+ },
+ },
+ },
+ }
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "auth/foo/")
+ meUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ exp.router.Mount(noop, "auth/foo/", &MountEntry{UUID: meUUID}, view)
+
+ le := &leaseEntry{
+ LeaseID: "auth/foo/1234",
+ Path: "auth/foo/login",
+ Auth: &logical.Auth{
+ LeaseOptions: logical.LeaseOptions{
+ Renewable: true,
+ TTL: time.Minute,
+ },
+ InternalData: map[string]interface{}{
+ "MySecret": "secret",
+ },
+ },
+ IssueTime: time.Now(),
+ ExpireTime: time.Now().Add(time.Minute),
+ }
+
+ resp, err := exp.renewAuthEntry(&logical.Request{}, le, time.Second)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ noop.Lock()
+ defer noop.Unlock()
+
+ if !reflect.DeepEqual(resp, noop.Response) {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ req := noop.Requests[0]
+ if req.Operation != logical.RenewOperation {
+ t.Fatalf("Bad: %v", req)
+ }
+ if req.Path != "login" {
+ t.Fatalf("Bad: %v", req)
+ }
+ if req.Auth.Increment != time.Second {
+ t.Fatalf("Bad: %v", req)
+ }
+ if req.Auth.IssueTime.IsZero() {
+ t.Fatalf("Bad: %v", req)
+ }
+ if req.Auth.InternalData["MySecret"] != "secret" {
+ t.Fatalf("Bad: %v", req)
+ }
+}
+
+func TestExpiration_PersistLoadDelete(t *testing.T) {
+ exp := mockExpiration(t)
+ lastTime := time.Now()
+ le := &leaseEntry{
+ LeaseID: "foo/bar/1234",
+ Path: "foo/bar",
+ Data: map[string]interface{}{
+ "testing": true,
+ },
+ Secret: &logical.Secret{
+ LeaseOptions: logical.LeaseOptions{
+ TTL: time.Minute,
+ },
+ },
+ IssueTime: lastTime,
+ ExpireTime: lastTime,
+ LastRenewalTime: lastTime,
+ }
+ if err := exp.persistEntry(le); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ out, err := exp.loadEntry("foo/bar/1234")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !le.LastRenewalTime.Equal(out.LastRenewalTime) ||
+ !le.IssueTime.Equal(out.IssueTime) ||
+ !le.ExpireTime.Equal(out.ExpireTime) {
+ t.Fatalf("bad: expected:\n%#v\nactual:\n%#v", le, out)
+ }
+ le.LastRenewalTime = out.LastRenewalTime
+ le.IssueTime = out.IssueTime
+ le.ExpireTime = out.ExpireTime
+ if !reflect.DeepEqual(out, le) {
+ t.Fatalf("bad: expected:\n%#v\nactual:\n%#v", le, out)
+ }
+
+ err = exp.deleteEntry("foo/bar/1234")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ out, err = exp.loadEntry("foo/bar/1234")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out != nil {
+ t.Fatalf("out: %#v", out)
+ }
+}
+
+func TestLeaseEntry(t *testing.T) {
+ le := &leaseEntry{
+ LeaseID: "foo/bar/1234",
+ Path: "foo/bar",
+ Data: map[string]interface{}{
+ "testing": true,
+ },
+ Secret: &logical.Secret{
+ LeaseOptions: logical.LeaseOptions{
+ TTL: time.Minute,
+ Renewable: true,
+ },
+ },
+ IssueTime: time.Now(),
+ ExpireTime: time.Now(),
+ }
+
+ enc, err := le.encode()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ out, err := decodeLeaseEntry(enc)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if !reflect.DeepEqual(out.Data, le.Data) {
+ t.Fatalf("got: %#v, expect %#v", out, le)
+ }
+
+ // Test renewability
+ le.ExpireTime = time.Time{}
+ if r, _ := le.renewable(); r {
+ t.Fatal("lease with zero expire time is not renewable")
+ }
+ le.ExpireTime = time.Now().Add(-1 * time.Hour)
+ if r, _ := le.renewable(); r {
+ t.Fatal("lease with expire time in the past is not renewable")
+ }
+ le.ExpireTime = time.Now().Add(1 * time.Hour)
+ if r, err := le.renewable(); !r {
+ t.Fatalf("lease with future expire time is renewable, err: %v", err)
+ }
+ le.Secret.LeaseOptions.Renewable = false
+ if r, _ := le.renewable(); r {
+ t.Fatal("secret is set to not be renewable but returns as renewable")
+ }
+ le.Secret = nil
+ le.Auth = &logical.Auth{
+ LeaseOptions: logical.LeaseOptions{
+ Renewable: true,
+ },
+ }
+ if r, err := le.renewable(); !r {
+ t.Fatalf("auth is renewable but is set to not be, err: %v", err)
+ }
+ le.Auth.LeaseOptions.Renewable = false
+ if r, _ := le.renewable(); r {
+ t.Fatal("auth is set to not be renewable but returns as renewable")
+ }
+}
+
+func TestExpiration_RevokeForce(t *testing.T) {
+ core, _, _, root := TestCoreWithTokenStore(t)
+
+ core.logicalBackends["badrenew"] = badRenewFactory
+ me := &MountEntry{
+ Table: mountTableType,
+ Path: "badrenew/",
+ Type: "badrenew",
+ }
+
+ err := core.mount(me)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ req := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "badrenew/creds",
+ ClientToken: root,
+ }
+
+ resp, err := core.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil {
+ t.Fatal("response was nil")
+ }
+ if resp.Secret == nil {
+ t.Fatalf("response secret was nil, response was %#v", *resp)
+ }
+
+ req.Operation = logical.UpdateOperation
+ req.Path = "sys/revoke-prefix/badrenew/creds"
+
+ resp, err = core.HandleRequest(req)
+ if err == nil {
+ t.Fatal("expected error")
+ }
+
+ req.Path = "sys/revoke-force/badrenew/creds"
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("got error: %s", err)
+ }
+}
+
+func badRenewFactory(conf *logical.BackendConfig) (logical.Backend, error) {
+ be := &framework.Backend{
+ Paths: []*framework.Path{
+ &framework.Path{
+ Pattern: "creds",
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: func(*logical.Request, *framework.FieldData) (*logical.Response, error) {
+ resp := &logical.Response{
+ Secret: &logical.Secret{
+ InternalData: map[string]interface{}{
+ "secret_type": "badRenewBackend",
+ },
+ },
+ }
+ resp.Secret.TTL = time.Second * 30
+ return resp, nil
+ },
+ },
+ },
+ },
+
+ Secrets: []*framework.Secret{
+ &framework.Secret{
+ Type: "badRenewBackend",
+ Revoke: func(*logical.Request, *framework.FieldData) (*logical.Response, error) {
+ return nil, fmt.Errorf("always errors")
+ },
+ },
+ },
+ }
+
+ return be.Setup(conf)
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/generate_root.go b/vendor/github.com/hashicorp/vault/vault/generate_root.go
new file mode 100644
index 0000000..4278b02
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/generate_root.go
@@ -0,0 +1,325 @@
+package vault
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/helper/consts"
+ "github.com/hashicorp/vault/helper/pgpkeys"
+ "github.com/hashicorp/vault/helper/xor"
+ "github.com/hashicorp/vault/shamir"
+)
+
+// GenerateRootConfig holds the configuration for a root generation
+// command.
+type GenerateRootConfig struct {
+ Nonce string
+ PGPKey string
+ PGPFingerprint string
+ OTP string
+}
+
+// GenerateRootResult holds the result of a root generation update
+// command
+type GenerateRootResult struct {
+ Progress int
+ Required int
+ EncodedRootToken string
+ PGPFingerprint string
+}
+
+// GenerateRoot is used to return the root generation progress (num shares)
+func (c *Core) GenerateRootProgress() (int, error) {
+ c.stateLock.RLock()
+ defer c.stateLock.RUnlock()
+ if c.sealed {
+ return 0, consts.ErrSealed
+ }
+ if c.standby {
+ return 0, consts.ErrStandby
+ }
+
+ c.generateRootLock.Lock()
+ defer c.generateRootLock.Unlock()
+
+ return len(c.generateRootProgress), nil
+}
+
+// GenerateRootConfig is used to read the root generation configuration
+// It stubbornly refuses to return the OTP if one is there.
+func (c *Core) GenerateRootConfiguration() (*GenerateRootConfig, error) {
+ c.stateLock.RLock()
+ defer c.stateLock.RUnlock()
+ if c.sealed {
+ return nil, consts.ErrSealed
+ }
+ if c.standby {
+ return nil, consts.ErrStandby
+ }
+
+ c.generateRootLock.Lock()
+ defer c.generateRootLock.Unlock()
+
+ // Copy the config if any
+ var conf *GenerateRootConfig
+ if c.generateRootConfig != nil {
+ conf = new(GenerateRootConfig)
+ *conf = *c.generateRootConfig
+ conf.OTP = ""
+ }
+ return conf, nil
+}
+
+// GenerateRootInit is used to initialize the root generation settings
+func (c *Core) GenerateRootInit(otp, pgpKey string) error {
+ var fingerprint string
+ switch {
+ case len(otp) > 0:
+ otpBytes, err := base64.StdEncoding.DecodeString(otp)
+ if err != nil {
+ return fmt.Errorf("error decoding base64 OTP value: %s", err)
+ }
+ if otpBytes == nil || len(otpBytes) != 16 {
+ return fmt.Errorf("decoded OTP value is invalid or wrong length")
+ }
+
+ case len(pgpKey) > 0:
+ fingerprints, err := pgpkeys.GetFingerprints([]string{pgpKey}, nil)
+ if err != nil {
+ return fmt.Errorf("error parsing PGP key: %s", err)
+ }
+ if len(fingerprints) != 1 || fingerprints[0] == "" {
+ return fmt.Errorf("could not acquire PGP key entity")
+ }
+ fingerprint = fingerprints[0]
+
+ default:
+ return fmt.Errorf("unreachable condition")
+ }
+
+ c.stateLock.RLock()
+ defer c.stateLock.RUnlock()
+ if c.sealed {
+ return consts.ErrSealed
+ }
+ if c.standby {
+ return consts.ErrStandby
+ }
+
+ c.generateRootLock.Lock()
+ defer c.generateRootLock.Unlock()
+
+ // Prevent multiple concurrent root generations
+ if c.generateRootConfig != nil {
+ return fmt.Errorf("root generation already in progress")
+ }
+
+ // Copy the configuration
+ generationNonce, err := uuid.GenerateUUID()
+ if err != nil {
+ return err
+ }
+
+ c.generateRootConfig = &GenerateRootConfig{
+ Nonce: generationNonce,
+ OTP: otp,
+ PGPKey: pgpKey,
+ PGPFingerprint: fingerprint,
+ }
+
+ if c.logger.IsInfo() {
+ c.logger.Info("core: root generation initialized", "nonce", c.generateRootConfig.Nonce)
+ }
+ return nil
+}
+
+// GenerateRootUpdate is used to provide a new key part
+func (c *Core) GenerateRootUpdate(key []byte, nonce string) (*GenerateRootResult, error) {
+ // Verify the key length
+ min, max := c.barrier.KeyLength()
+ max += shamir.ShareOverhead
+ if len(key) < min {
+ return nil, &ErrInvalidKey{fmt.Sprintf("key is shorter than minimum %d bytes", min)}
+ }
+ if len(key) > max {
+ return nil, &ErrInvalidKey{fmt.Sprintf("key is longer than maximum %d bytes", max)}
+ }
+
+ // Get the seal configuration
+ var config *SealConfig
+ var err error
+ if c.seal.RecoveryKeySupported() {
+ config, err = c.seal.RecoveryConfig()
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ config, err = c.seal.BarrierConfig()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Ensure the barrier is initialized
+ if config == nil {
+ return nil, ErrNotInit
+ }
+
+ // Ensure we are already unsealed
+ c.stateLock.RLock()
+ defer c.stateLock.RUnlock()
+ if c.sealed {
+ return nil, consts.ErrSealed
+ }
+ if c.standby {
+ return nil, consts.ErrStandby
+ }
+
+ c.generateRootLock.Lock()
+ defer c.generateRootLock.Unlock()
+
+ // Ensure a generateRoot is in progress
+ if c.generateRootConfig == nil {
+ return nil, fmt.Errorf("no root generation in progress")
+ }
+
+ if nonce != c.generateRootConfig.Nonce {
+ return nil, fmt.Errorf("incorrect nonce supplied; nonce for this root generation operation is %s", c.generateRootConfig.Nonce)
+ }
+
+ // Check if we already have this piece
+ for _, existing := range c.generateRootProgress {
+ if bytes.Equal(existing, key) {
+ return nil, fmt.Errorf("given key has already been provided during this generation operation")
+ }
+ }
+
+ // Store this key
+ c.generateRootProgress = append(c.generateRootProgress, key)
+ progress := len(c.generateRootProgress)
+
+ // Check if we don't have enough keys to unlock
+ if len(c.generateRootProgress) < config.SecretThreshold {
+ if c.logger.IsDebug() {
+ c.logger.Debug("core: cannot generate root, not enough keys", "keys", progress, "threshold", config.SecretThreshold)
+ }
+ return &GenerateRootResult{
+ Progress: progress,
+ Required: config.SecretThreshold,
+ PGPFingerprint: c.generateRootConfig.PGPFingerprint,
+ }, nil
+ }
+
+ // Recover the master key
+ var masterKey []byte
+ if config.SecretThreshold == 1 {
+ masterKey = c.generateRootProgress[0]
+ c.generateRootProgress = nil
+ } else {
+ masterKey, err = shamir.Combine(c.generateRootProgress)
+ c.generateRootProgress = nil
+ if err != nil {
+ return nil, fmt.Errorf("failed to compute master key: %v", err)
+ }
+ }
+
+ // Verify the master key
+ if c.seal.RecoveryKeySupported() {
+ if err := c.seal.VerifyRecoveryKey(masterKey); err != nil {
+ c.logger.Error("core: root generation aborted, recovery key verification failed", "error", err)
+ return nil, err
+ }
+ } else {
+ if err := c.barrier.VerifyMaster(masterKey); err != nil {
+ c.logger.Error("core: root generation aborted, master key verification failed", "error", err)
+ return nil, err
+ }
+ }
+
+ te, err := c.tokenStore.rootToken()
+ if err != nil {
+ c.logger.Error("core: root token generation failed", "error", err)
+ return nil, err
+ }
+ if te == nil {
+ c.logger.Error("core: got nil token entry back from root generation")
+ return nil, fmt.Errorf("got nil token entry back from root generation")
+ }
+
+ uuidBytes, err := uuid.ParseUUID(te.ID)
+ if err != nil {
+ c.tokenStore.Revoke(te.ID)
+ c.logger.Error("core: error getting generated token bytes", "error", err)
+ return nil, err
+ }
+ if uuidBytes == nil {
+ c.tokenStore.Revoke(te.ID)
+ c.logger.Error("core: got nil parsed UUID bytes")
+ return nil, fmt.Errorf("got nil parsed UUID bytes")
+ }
+
+ var tokenBytes []byte
+ // Get the encoded value first so that if there is an error we don't create
+ // the root token.
+ switch {
+ case len(c.generateRootConfig.OTP) > 0:
+ // This function performs decoding checks so rather than decode the OTP,
+ // just encode the value we're passing in.
+ tokenBytes, err = xor.XORBase64(c.generateRootConfig.OTP, base64.StdEncoding.EncodeToString(uuidBytes))
+ if err != nil {
+ c.tokenStore.Revoke(te.ID)
+ c.logger.Error("core: xor of root token failed", "error", err)
+ return nil, err
+ }
+
+ case len(c.generateRootConfig.PGPKey) > 0:
+ _, tokenBytesArr, err := pgpkeys.EncryptShares([][]byte{[]byte(te.ID)}, []string{c.generateRootConfig.PGPKey})
+ if err != nil {
+ c.tokenStore.Revoke(te.ID)
+ c.logger.Error("core: error encrypting new root token", "error", err)
+ return nil, err
+ }
+ tokenBytes = tokenBytesArr[0]
+
+ default:
+ c.tokenStore.Revoke(te.ID)
+ return nil, fmt.Errorf("unreachable condition")
+ }
+
+ results := &GenerateRootResult{
+ Progress: progress,
+ Required: config.SecretThreshold,
+ EncodedRootToken: base64.StdEncoding.EncodeToString(tokenBytes),
+ PGPFingerprint: c.generateRootConfig.PGPFingerprint,
+ }
+
+ if c.logger.IsInfo() {
+ c.logger.Info("core: root generation finished", "nonce", c.generateRootConfig.Nonce)
+ }
+
+ c.generateRootProgress = nil
+ c.generateRootConfig = nil
+ return results, nil
+}
+
+// GenerateRootCancel is used to cancel an in-progress root generation
+func (c *Core) GenerateRootCancel() error {
+ c.stateLock.RLock()
+ defer c.stateLock.RUnlock()
+ if c.sealed {
+ return consts.ErrSealed
+ }
+ if c.standby {
+ return consts.ErrStandby
+ }
+
+ c.generateRootLock.Lock()
+ defer c.generateRootLock.Unlock()
+
+ // Clear any progress or config
+ c.generateRootConfig = nil
+ c.generateRootProgress = nil
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/generate_root_test.go b/vendor/github.com/hashicorp/vault/vault/generate_root_test.go
new file mode 100644
index 0000000..3def4d9
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/generate_root_test.go
@@ -0,0 +1,341 @@
+package vault
+
+import (
+ "encoding/base64"
+ "testing"
+
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/helper/pgpkeys"
+ "github.com/hashicorp/vault/helper/xor"
+)
+
+func TestCore_GenerateRoot_Lifecycle(t *testing.T) {
+ bc, rc := TestSealDefConfigs()
+ c, masterKeys, _, _ := TestCoreUnsealedWithConfigs(t, bc, rc)
+ c.seal.(*TestSeal).recoveryKeysDisabled = true
+ testCore_GenerateRoot_Lifecycle_Common(t, c, masterKeys)
+
+ bc, rc = TestSealDefConfigs()
+ c, _, recoveryKeys, _ := TestCoreUnsealedWithConfigs(t, bc, rc)
+ testCore_GenerateRoot_Lifecycle_Common(t, c, recoveryKeys)
+}
+
+func testCore_GenerateRoot_Lifecycle_Common(t *testing.T, c *Core, keys [][]byte) {
+ // Verify update not allowed
+ if _, err := c.GenerateRootUpdate(keys[0], ""); err == nil {
+ t.Fatalf("no root generation in progress")
+ }
+
+ // Should be no progress
+ num, err := c.GenerateRootProgress()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if num != 0 {
+ t.Fatalf("bad: %d", num)
+ }
+
+ // Should be no config
+ conf, err := c.GenerateRootConfiguration()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if conf != nil {
+ t.Fatalf("bad: %v", conf)
+ }
+
+ // Cancel should be idempotent
+ err = c.GenerateRootCancel()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ otpBytes, err := GenerateRandBytes(16)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Start a root generation
+ err = c.GenerateRootInit(base64.StdEncoding.EncodeToString(otpBytes), "")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should get config
+ conf, err = c.GenerateRootConfiguration()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Cancel should be clear
+ err = c.GenerateRootCancel()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should be no config
+ conf, err = c.GenerateRootConfiguration()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if conf != nil {
+ t.Fatalf("bad: %v", conf)
+ }
+}
+
+func TestCore_GenerateRoot_Init(t *testing.T) {
+ c, _, _ := TestCoreUnsealed(t)
+ testCore_GenerateRoot_Init_Common(t, c)
+
+ bc, rc := TestSealDefConfigs()
+ c, _, _, _ = TestCoreUnsealedWithConfigs(t, bc, rc)
+ testCore_GenerateRoot_Init_Common(t, c)
+}
+
+func testCore_GenerateRoot_Init_Common(t *testing.T, c *Core) {
+ otpBytes, err := GenerateRandBytes(16)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = c.GenerateRootInit(base64.StdEncoding.EncodeToString(otpBytes), "")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Second should fail
+ err = c.GenerateRootInit("", pgpkeys.TestPubKey1)
+ if err == nil {
+ t.Fatalf("should fail")
+ }
+}
+
+func TestCore_GenerateRoot_InvalidMasterNonce(t *testing.T) {
+ bc, rc := TestSealDefConfigs()
+ bc.SecretShares = 1
+ bc.SecretThreshold = 1
+ bc.StoredShares = 0
+ c, masterKeys, _, _ := TestCoreUnsealedWithConfigs(t, bc, rc)
+ c.seal.(*TestSeal).recoveryKeysDisabled = true
+ // Make the master invalid
+ masterKeys[0][0]++
+ testCore_GenerateRoot_InvalidMasterNonce_Common(t, c, masterKeys)
+
+ bc, rc = TestSealDefConfigs()
+ // For ease of use let's make the threshold the same as the shares and also
+ // no stored shares so we get an error after the full set
+ bc.StoredShares = 0
+ bc.SecretShares = 5
+ bc.SecretThreshold = 5
+ rc.SecretShares = 5
+ rc.SecretThreshold = 5
+ // In this case, pass in master keys instead as they'll be invalid
+ c, masterKeys, _, _ = TestCoreUnsealedWithConfigs(t, bc, rc)
+ testCore_GenerateRoot_InvalidMasterNonce_Common(t, c, masterKeys)
+}
+
+func testCore_GenerateRoot_InvalidMasterNonce_Common(t *testing.T, c *Core, keys [][]byte) {
+ otpBytes, err := GenerateRandBytes(16)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = c.GenerateRootInit(base64.StdEncoding.EncodeToString(otpBytes), "")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Fetch new config with generated nonce
+ rgconf, err := c.GenerateRootConfiguration()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if rgconf == nil {
+ t.Fatalf("bad: no rekey config received")
+ }
+
+ // Provide the nonce (invalid)
+ _, err = c.GenerateRootUpdate(keys[0], "abcd")
+ if err == nil {
+ t.Fatalf("expected error")
+ }
+
+ // Provide the master (invalid)
+ for _, key := range keys {
+ _, err = c.GenerateRootUpdate(key, rgconf.Nonce)
+ }
+ if err == nil {
+ t.Fatalf("expected error")
+ }
+}
+
+func TestCore_GenerateRoot_Update_OTP(t *testing.T) {
+ bc, rc := TestSealDefConfigs()
+ c, masterKeys, _, _ := TestCoreUnsealedWithConfigs(t, bc, rc)
+ c.seal.(*TestSeal).recoveryKeysDisabled = true
+ testCore_GenerateRoot_Update_OTP_Common(t, c, masterKeys[0:bc.SecretThreshold])
+
+ bc, rc = TestSealDefConfigs()
+ c, _, recoveryKeys, _ := TestCoreUnsealedWithConfigs(t, bc, rc)
+ testCore_GenerateRoot_Update_OTP_Common(t, c, recoveryKeys[0:rc.SecretThreshold])
+}
+
+func testCore_GenerateRoot_Update_OTP_Common(t *testing.T, c *Core, keys [][]byte) {
+ otpBytes, err := GenerateRandBytes(16)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ otp := base64.StdEncoding.EncodeToString(otpBytes)
+ // Start a root generation
+ err = c.GenerateRootInit(otp, "")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Fetch new config with generated nonce
+ rkconf, err := c.GenerateRootConfiguration()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if rkconf == nil {
+ t.Fatalf("bad: no root generation config received")
+ }
+
+ // Provide the keys
+ var result *GenerateRootResult
+ for _, key := range keys {
+ result, err = c.GenerateRootUpdate(key, rkconf.Nonce)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ }
+ if result == nil {
+ t.Fatalf("Bad, result is nil")
+ }
+
+ encodedRootToken := result.EncodedRootToken
+
+ // Should be no progress
+ num, err := c.GenerateRootProgress()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if num != 0 {
+ t.Fatalf("bad: %d", num)
+ }
+
+ // Should be no config
+ conf, err := c.GenerateRootConfiguration()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if conf != nil {
+ t.Fatalf("bad: %v", conf)
+ }
+
+ tokenBytes, err := xor.XORBase64(encodedRootToken, otp)
+ if err != nil {
+ t.Fatal(err)
+ }
+ token, err := uuid.FormatUUID(tokenBytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Ensure that the token is a root token
+ te, err := c.tokenStore.Lookup(token)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if te == nil {
+ t.Fatalf("token was nil")
+ }
+ if te.ID != token || te.Parent != "" ||
+ len(te.Policies) != 1 || te.Policies[0] != "root" {
+ t.Fatalf("bad: %#v", *te)
+ }
+}
+
+func TestCore_GenerateRoot_Update_PGP(t *testing.T) {
+ bc, rc := TestSealDefConfigs()
+ c, masterKeys, _, _ := TestCoreUnsealedWithConfigs(t, bc, rc)
+ c.seal.(*TestSeal).recoveryKeysDisabled = true
+ testCore_GenerateRoot_Update_PGP_Common(t, c, masterKeys[0:bc.SecretThreshold])
+
+ bc, rc = TestSealDefConfigs()
+ c, _, recoveryKeys, _ := TestCoreUnsealedWithConfigs(t, bc, rc)
+ testCore_GenerateRoot_Update_PGP_Common(t, c, recoveryKeys[0:rc.SecretThreshold])
+}
+
+func testCore_GenerateRoot_Update_PGP_Common(t *testing.T, c *Core, keys [][]byte) {
+ // Start a root generation
+ err := c.GenerateRootInit("", pgpkeys.TestPubKey1)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Fetch new config with generated nonce
+ rkconf, err := c.GenerateRootConfiguration()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if rkconf == nil {
+ t.Fatalf("bad: no root generation config received")
+ }
+
+ // Provide the keys
+ var result *GenerateRootResult
+ for _, key := range keys {
+ result, err = c.GenerateRootUpdate(key, rkconf.Nonce)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ }
+ if result == nil {
+ t.Fatalf("Bad, result is nil")
+ }
+
+ encodedRootToken := result.EncodedRootToken
+
+ // Should be no progress
+ num, err := c.GenerateRootProgress()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if num != 0 {
+ t.Fatalf("bad: %d", num)
+ }
+
+ // Should be no config
+ conf, err := c.GenerateRootConfiguration()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if conf != nil {
+ t.Fatalf("bad: %v", conf)
+ }
+
+ ptBuf, err := pgpkeys.DecryptBytes(encodedRootToken, pgpkeys.TestPrivKey1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if ptBuf == nil {
+ t.Fatal("Got nil plaintext key")
+ }
+
+ token := ptBuf.String()
+
+ // Ensure that the token is a root token
+ te, err := c.tokenStore.Lookup(token)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if te == nil {
+ t.Fatalf("token was nil")
+ }
+ if te.ID != token || te.Parent != "" ||
+ len(te.Policies) != 1 || te.Policies[0] != "root" {
+ t.Fatalf("bad: %#v", *te)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/init.go b/vendor/github.com/hashicorp/vault/vault/init.go
new file mode 100644
index 0000000..3e267fd
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/init.go
@@ -0,0 +1,302 @@
+package vault
+
+import (
+ "encoding/base64"
+ "encoding/hex"
+ "fmt"
+
+ "github.com/hashicorp/vault/helper/pgpkeys"
+ "github.com/hashicorp/vault/shamir"
+)
+
+// InitParams keeps the init function from being littered with too many
+// params, that's it!
+type InitParams struct {
+ BarrierConfig *SealConfig
+ RecoveryConfig *SealConfig
+ RootTokenPGPKey string
+}
+
+// InitResult is used to provide the key parts back after
+// they are generated as part of the initialization.
+type InitResult struct {
+ SecretShares [][]byte
+ RecoveryShares [][]byte
+ RootToken string
+}
+
+// Initialized checks if the Vault is already initialized
+func (c *Core) Initialized() (bool, error) {
+ // Check the barrier first
+ init, err := c.barrier.Initialized()
+ if err != nil {
+ c.logger.Error("core: barrier init check failed", "error", err)
+ return false, err
+ }
+ if !init {
+ c.logger.Info("core: security barrier not initialized")
+ return false, nil
+ }
+
+ // Verify the seal configuration
+ sealConf, err := c.seal.BarrierConfig()
+ if err != nil {
+ return false, err
+ }
+ if sealConf == nil {
+ return false, fmt.Errorf("core: barrier reports initialized but no seal configuration found")
+ }
+
+ return true, nil
+}
+
+func (c *Core) generateShares(sc *SealConfig) ([]byte, [][]byte, error) {
+ // Generate a master key
+ masterKey, err := c.barrier.GenerateKey()
+ if err != nil {
+ return nil, nil, fmt.Errorf("key generation failed: %v", err)
+ }
+
+ // Return the master key if only a single key part is used
+ var unsealKeys [][]byte
+ if sc.SecretShares == 1 {
+ unsealKeys = append(unsealKeys, masterKey)
+ } else {
+ // Split the master key using the Shamir algorithm
+ shares, err := shamir.Split(masterKey, sc.SecretShares, sc.SecretThreshold)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to generate barrier shares: %v", err)
+ }
+ unsealKeys = shares
+ }
+
+ // If we have PGP keys, perform the encryption
+ if len(sc.PGPKeys) > 0 {
+ hexEncodedShares := make([][]byte, len(unsealKeys))
+ for i, _ := range unsealKeys {
+ hexEncodedShares[i] = []byte(hex.EncodeToString(unsealKeys[i]))
+ }
+ _, encryptedShares, err := pgpkeys.EncryptShares(hexEncodedShares, sc.PGPKeys)
+ if err != nil {
+ return nil, nil, err
+ }
+ unsealKeys = encryptedShares
+ }
+
+ return masterKey, unsealKeys, nil
+}
+
+// Initialize is used to initialize the Vault with the given
+// configurations.
+func (c *Core) Initialize(initParams *InitParams) (*InitResult, error) {
+ barrierConfig := initParams.BarrierConfig
+ recoveryConfig := initParams.RecoveryConfig
+
+ if c.seal.RecoveryKeySupported() {
+ if recoveryConfig == nil {
+ return nil, fmt.Errorf("recovery configuration must be supplied")
+ }
+
+ if recoveryConfig.SecretShares < 1 {
+ return nil, fmt.Errorf("recovery configuration must specify a positive number of shares")
+ }
+
+ // Check if the seal configuration is valid
+ if err := recoveryConfig.Validate(); err != nil {
+ c.logger.Error("core: invalid recovery configuration", "error", err)
+ return nil, fmt.Errorf("invalid recovery configuration: %v", err)
+ }
+ }
+
+ // Check if the seal configuration is valid
+ if err := barrierConfig.Validate(); err != nil {
+ c.logger.Error("core: invalid seal configuration", "error", err)
+ return nil, fmt.Errorf("invalid seal configuration: %v", err)
+ }
+
+ // Avoid an initialization race
+ c.stateLock.Lock()
+ defer c.stateLock.Unlock()
+
+ // Check if we are initialized
+ init, err := c.Initialized()
+ if err != nil {
+ return nil, err
+ }
+ if init {
+ return nil, ErrAlreadyInit
+ }
+
+ err = c.seal.Init()
+ if err != nil {
+ c.logger.Error("core: failed to initialize seal", "error", err)
+ return nil, fmt.Errorf("error initializing seal: %v", err)
+ }
+
+ barrierKey, barrierUnsealKeys, err := c.generateShares(barrierConfig)
+ if err != nil {
+ c.logger.Error("core: error generating shares", "error", err)
+ return nil, err
+ }
+
+ // Initialize the barrier
+ if err := c.barrier.Initialize(barrierKey); err != nil {
+ c.logger.Error("core: failed to initialize barrier", "error", err)
+ return nil, fmt.Errorf("failed to initialize barrier: %v", err)
+ }
+ if c.logger.IsInfo() {
+ c.logger.Info("core: security barrier initialized", "shares", barrierConfig.SecretShares, "threshold", barrierConfig.SecretThreshold)
+ }
+
+ // Unseal the barrier
+ if err := c.barrier.Unseal(barrierKey); err != nil {
+ c.logger.Error("core: failed to unseal barrier", "error", err)
+ return nil, fmt.Errorf("failed to unseal barrier: %v", err)
+ }
+
+ // Ensure the barrier is re-sealed
+ defer func() {
+ // Defers are LIFO so we need to run this here too to ensure the stop
+ // happens before sealing. preSeal also stops, so we just make the
+ // stopping safe against multiple calls.
+ if err := c.barrier.Seal(); err != nil {
+ c.logger.Error("core: failed to seal barrier", "error", err)
+ }
+ }()
+
+ err = c.seal.SetBarrierConfig(barrierConfig)
+ if err != nil {
+ c.logger.Error("core: failed to save barrier configuration", "error", err)
+ return nil, fmt.Errorf("barrier configuration saving failed: %v", err)
+ }
+
+ // If we are storing shares, pop them out of the returned results and push
+ // them through the seal
+ if barrierConfig.StoredShares > 0 {
+ var keysToStore [][]byte
+ for i := 0; i < barrierConfig.StoredShares; i++ {
+ keysToStore = append(keysToStore, barrierUnsealKeys[0])
+ barrierUnsealKeys = barrierUnsealKeys[1:]
+ }
+ if err := c.seal.SetStoredKeys(keysToStore); err != nil {
+ c.logger.Error("core: failed to store keys", "error", err)
+ return nil, fmt.Errorf("failed to store keys: %v", err)
+ }
+ }
+
+ results := &InitResult{
+ SecretShares: barrierUnsealKeys,
+ }
+
+ // Perform initial setup
+ if err := c.setupCluster(); err != nil {
+ c.logger.Error("core: cluster setup failed during init", "error", err)
+ return nil, err
+ }
+ if err := c.postUnseal(); err != nil {
+ c.logger.Error("core: post-unseal setup failed during init", "error", err)
+ return nil, err
+ }
+
+ // Save the configuration regardless, but only generate a key if it's not
+ // disabled. When using recovery keys they are stored in the barrier, so
+ // this must happen post-unseal.
+ if c.seal.RecoveryKeySupported() {
+ err = c.seal.SetRecoveryConfig(recoveryConfig)
+ if err != nil {
+ c.logger.Error("core: failed to save recovery configuration", "error", err)
+ return nil, fmt.Errorf("recovery configuration saving failed: %v", err)
+ }
+
+ if recoveryConfig.SecretShares > 0 {
+ recoveryKey, recoveryUnsealKeys, err := c.generateShares(recoveryConfig)
+ if err != nil {
+ c.logger.Error("core: failed to generate recovery shares", "error", err)
+ return nil, err
+ }
+
+ err = c.seal.SetRecoveryKey(recoveryKey)
+ if err != nil {
+ return nil, err
+ }
+
+ results.RecoveryShares = recoveryUnsealKeys
+ }
+ }
+
+ // Generate a new root token
+ rootToken, err := c.tokenStore.rootToken()
+ if err != nil {
+ c.logger.Error("core: root token generation failed", "error", err)
+ return nil, err
+ }
+ results.RootToken = rootToken.ID
+ c.logger.Info("core: root token generated")
+
+ if initParams.RootTokenPGPKey != "" {
+ _, encryptedVals, err := pgpkeys.EncryptShares([][]byte{[]byte(results.RootToken)}, []string{initParams.RootTokenPGPKey})
+ if err != nil {
+ c.logger.Error("core: root token encryption failed", "error", err)
+ return nil, err
+ }
+ results.RootToken = base64.StdEncoding.EncodeToString(encryptedVals[0])
+ }
+
+ // Prepare to re-seal
+ if err := c.preSeal(); err != nil {
+ c.logger.Error("core: pre-seal teardown failed", "error", err)
+ return nil, err
+ }
+
+ return results, nil
+}
+
+func (c *Core) UnsealWithStoredKeys() error {
+ if !c.seal.StoredKeysSupported() {
+ return nil
+ }
+
+ sealed, err := c.Sealed()
+ if err != nil {
+ c.logger.Error("core: error checking sealed status in auto-unseal", "error", err)
+ return fmt.Errorf("error checking sealed status in auto-unseal: %s", err)
+ }
+ if !sealed {
+ return nil
+ }
+
+ c.logger.Info("core: stored unseal keys supported, attempting fetch")
+ keys, err := c.seal.GetStoredKeys()
+ if err != nil {
+ c.logger.Error("core: fetching stored unseal keys failed", "error", err)
+ return &NonFatalError{Err: fmt.Errorf("fetching stored unseal keys failed: %v", err)}
+ }
+ if len(keys) == 0 {
+ c.logger.Warn("core: stored unseal key(s) supported but none found")
+ } else {
+ unsealed := false
+ keysUsed := 0
+ for _, key := range keys {
+ unsealed, err = c.Unseal(key)
+ if err != nil {
+ c.logger.Error("core: unseal with stored unseal key failed", "error", err)
+ return &NonFatalError{Err: fmt.Errorf("unseal with stored key failed: %v", err)}
+ }
+ keysUsed += 1
+ if unsealed {
+ break
+ }
+ }
+ if !unsealed {
+ if c.logger.IsWarn() {
+ c.logger.Warn("core: stored unseal key(s) used but Vault not unsealed yet", "stored_keys_used", keysUsed)
+ }
+ } else {
+ if c.logger.IsInfo() {
+ c.logger.Info("core: successfully unsealed with stored key(s)", "stored_keys_used", keysUsed)
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/init_test.go b/vendor/github.com/hashicorp/vault/vault/init_test.go
new file mode 100644
index 0000000..38d95e4
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/init_test.go
@@ -0,0 +1,168 @@
+package vault
+
+import (
+ "reflect"
+ "testing"
+
+ log "github.com/mgutz/logxi/v1"
+
+ "github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/physical"
+)
+
+func TestCore_Init(t *testing.T) {
+ c, conf := testCore_NewTestCore(t, nil)
+ testCore_Init_Common(t, c, conf, &SealConfig{SecretShares: 5, SecretThreshold: 3}, nil)
+
+ c, conf = testCore_NewTestCore(t, newTestSeal(t))
+ bc, rc := TestSealDefConfigs()
+ rc.SecretShares = 4
+ rc.SecretThreshold = 2
+ testCore_Init_Common(t, c, conf, bc, rc)
+}
+
+func testCore_NewTestCore(t *testing.T, seal Seal) (*Core, *CoreConfig) {
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ inm := physical.NewInmem(logger)
+ conf := &CoreConfig{
+ Physical: inm,
+ DisableMlock: true,
+ LogicalBackends: map[string]logical.Factory{
+ "generic": LeasedPassthroughBackendFactory,
+ },
+ Seal: seal,
+ }
+ c, err := NewCore(conf)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ return c, conf
+}
+
+func testCore_Init_Common(t *testing.T, c *Core, conf *CoreConfig, barrierConf, recoveryConf *SealConfig) {
+ init, err := c.Initialized()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if init {
+ t.Fatalf("should not be init")
+ }
+
+ // Check the seal configuration
+ outConf, err := c.seal.BarrierConfig()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if outConf != nil {
+ t.Fatalf("bad: %v", outConf)
+ }
+ if recoveryConf != nil {
+ outConf, err := c.seal.RecoveryConfig()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if outConf != nil {
+ t.Fatalf("bad: %v", outConf)
+ }
+ }
+
+ res, err := c.Initialize(&InitParams{
+ BarrierConfig: barrierConf,
+ RecoveryConfig: recoveryConf,
+ })
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if len(res.SecretShares) != (barrierConf.SecretShares - barrierConf.StoredShares) {
+ t.Fatalf("Bad: got\n%#v\nexpected conf matching\n%#v\n", *res, *barrierConf)
+ }
+ if recoveryConf != nil {
+ if len(res.RecoveryShares) != recoveryConf.SecretShares {
+ t.Fatalf("Bad: got\n%#v\nexpected conf matching\n%#v\n", *res, *recoveryConf)
+ }
+ }
+
+ if res.RootToken == "" {
+ t.Fatalf("Bad: %#v", res)
+ }
+
+ _, err = c.Initialize(&InitParams{
+ BarrierConfig: barrierConf,
+ RecoveryConfig: recoveryConf,
+ })
+ if err != ErrAlreadyInit {
+ t.Fatalf("err: %v", err)
+ }
+
+ init, err = c.Initialized()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if !init {
+ t.Fatalf("should be init")
+ }
+
+ // Check the seal configuration
+ outConf, err = c.seal.BarrierConfig()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !reflect.DeepEqual(outConf, barrierConf) {
+ t.Fatalf("bad: %v expect: %v", outConf, barrierConf)
+ }
+ if recoveryConf != nil {
+ outConf, err = c.seal.RecoveryConfig()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !reflect.DeepEqual(outConf, recoveryConf) {
+ t.Fatalf("bad: %v expect: %v", outConf, recoveryConf)
+ }
+ }
+
+ // New Core, same backend
+ c2, err := NewCore(conf)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ _, err = c2.Initialize(&InitParams{
+ BarrierConfig: barrierConf,
+ RecoveryConfig: recoveryConf,
+ })
+ if err != ErrAlreadyInit {
+ t.Fatalf("err: %v", err)
+ }
+
+ init, err = c2.Initialized()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if !init {
+ t.Fatalf("should be init")
+ }
+
+ // Check the seal configuration
+ outConf, err = c2.seal.BarrierConfig()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !reflect.DeepEqual(outConf, barrierConf) {
+ t.Fatalf("bad: %v expect: %v", outConf, barrierConf)
+ }
+ if recoveryConf != nil {
+ outConf, err = c2.seal.RecoveryConfig()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !reflect.DeepEqual(outConf, recoveryConf) {
+ t.Fatalf("bad: %v expect: %v", outConf, recoveryConf)
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/keyring.go b/vendor/github.com/hashicorp/vault/vault/keyring.go
new file mode 100644
index 0000000..2cd4871
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/keyring.go
@@ -0,0 +1,202 @@
+package vault
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/hashicorp/vault/helper/jsonutil"
+)
+
+// Keyring is used to manage multiple encryption keys used by
+// the barrier. New keys can be installed and each has a sequential term.
+// The term used to encrypt a key is prefixed to the key written out.
+// All data is encrypted with the latest key, but storing the old keys
+// allows for decryption of keys written previously. Along with the encryption
+// keys, the keyring also tracks the master key. This is necessary so that
+// when a new key is added to the keyring, we can encrypt with the master key
+// and write out the new keyring.
+type Keyring struct {
+ masterKey []byte
+ keys map[uint32]*Key
+ activeTerm uint32
+}
+
+// EncodedKeyring is used for serialization of the keyring
+type EncodedKeyring struct {
+ MasterKey []byte
+ Keys []*Key
+}
+
+// Key represents a single term, along with the key used.
+type Key struct {
+ Term uint32
+ Version int
+ Value []byte
+ InstallTime time.Time
+}
+
+// Serialize is used to create a byte encoded key
+func (k *Key) Serialize() ([]byte, error) {
+ return json.Marshal(k)
+}
+
+// DeserializeKey is used to deserialize and return a new key
+func DeserializeKey(buf []byte) (*Key, error) {
+ k := new(Key)
+ if err := jsonutil.DecodeJSON(buf, k); err != nil {
+ return nil, fmt.Errorf("deserialization failed: %v", err)
+ }
+ return k, nil
+}
+
+// NewKeyring creates a new keyring
+func NewKeyring() *Keyring {
+ k := &Keyring{
+ keys: make(map[uint32]*Key),
+ activeTerm: 0,
+ }
+ return k
+}
+
+// Clone returns a new copy of the keyring
+func (k *Keyring) Clone() *Keyring {
+ clone := &Keyring{
+ masterKey: k.masterKey,
+ keys: make(map[uint32]*Key, len(k.keys)),
+ activeTerm: k.activeTerm,
+ }
+ for idx, key := range k.keys {
+ clone.keys[idx] = key
+ }
+ return clone
+}
+
+// AddKey adds a new key to the keyring
+func (k *Keyring) AddKey(key *Key) (*Keyring, error) {
+ // Ensure there is no conflict
+ if exist, ok := k.keys[key.Term]; ok {
+ if !bytes.Equal(key.Value, exist.Value) {
+ return nil, fmt.Errorf("Conflicting key for term %d already installed", key.Term)
+ }
+ return k, nil
+ }
+
+ // Add a time if none
+ if key.InstallTime.IsZero() {
+ key.InstallTime = time.Now()
+ }
+
+ // Make a new keyring
+ clone := k.Clone()
+
+ // Install the new key
+ clone.keys[key.Term] = key
+
+ // Update the active term if newer
+ if key.Term > clone.activeTerm {
+ clone.activeTerm = key.Term
+ }
+ return clone, nil
+}
+
+// RemoveKey removes a key from the keyring
+func (k *Keyring) RemoveKey(term uint32) (*Keyring, error) {
+ // Ensure this is not the active key
+ if term == k.activeTerm {
+ return nil, fmt.Errorf("Cannot remove active key")
+ }
+
+ // Check if this term does not exist
+ if _, ok := k.keys[term]; !ok {
+ return k, nil
+ }
+
+ // Delete the key
+ clone := k.Clone()
+ delete(clone.keys, term)
+ return clone, nil
+}
+
+// ActiveTerm returns the currently active term
+func (k *Keyring) ActiveTerm() uint32 {
+ return k.activeTerm
+}
+
+// ActiveKey returns the active encryption key, or nil
+func (k *Keyring) ActiveKey() *Key {
+ return k.keys[k.activeTerm]
+}
+
+// TermKey returns the key for the given term, or nil
+func (k *Keyring) TermKey(term uint32) *Key {
+ return k.keys[term]
+}
+
+// SetMasterKey is used to update the master key
+func (k *Keyring) SetMasterKey(val []byte) *Keyring {
+ valCopy := make([]byte, len(val))
+ copy(valCopy, val)
+ clone := k.Clone()
+ clone.masterKey = valCopy
+ return clone
+}
+
+// MasterKey returns the master key
+func (k *Keyring) MasterKey() []byte {
+ return k.masterKey
+}
+
+// Serialize is used to create a byte encoded keyring
+func (k *Keyring) Serialize() ([]byte, error) {
+ // Create the encoded entry
+ enc := EncodedKeyring{
+ MasterKey: k.masterKey,
+ }
+ for _, key := range k.keys {
+ enc.Keys = append(enc.Keys, key)
+ }
+
+ // JSON encode the keyring
+ buf, err := json.Marshal(enc)
+ return buf, err
+}
+
+// DeserializeKeyring is used to deserialize and return a new keyring
+func DeserializeKeyring(buf []byte) (*Keyring, error) {
+ // Deserialize the keyring
+ var enc EncodedKeyring
+ if err := jsonutil.DecodeJSON(buf, &enc); err != nil {
+ return nil, fmt.Errorf("deserialization failed: %v", err)
+ }
+
+ // Create a new keyring
+ k := NewKeyring()
+ k.masterKey = enc.MasterKey
+ for _, key := range enc.Keys {
+ k.keys[key.Term] = key
+ if key.Term > k.activeTerm {
+ k.activeTerm = key.Term
+ }
+ }
+ return k, nil
+}
+
+// N.B.:
+// Since Go 1.5 these are not reliable; see the documentation around the memzero
+// function. These are best-effort.
+func (k *Keyring) Zeroize(keysToo bool) {
+ if k == nil {
+ return
+ }
+ if k.masterKey != nil {
+ memzero(k.masterKey)
+ }
+ if !keysToo || k.keys == nil {
+ return
+ }
+ for _, key := range k.keys {
+ memzero(key.Value)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/keyring_test.go b/vendor/github.com/hashicorp/vault/vault/keyring_test.go
new file mode 100644
index 0000000..60e3925
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/keyring_test.go
@@ -0,0 +1,208 @@
+package vault
+
+import (
+ "bytes"
+ "reflect"
+ "testing"
+ "time"
+)
+
+func TestKeyring(t *testing.T) {
+ k := NewKeyring()
+
+ // Term should be 0
+ if term := k.ActiveTerm(); term != 0 {
+ t.Fatalf("bad: %d", term)
+ }
+
+ // Should have no key
+ if key := k.ActiveKey(); key != nil {
+ t.Fatalf("bad: %v", key)
+ }
+
+ // Add a key
+ testKey := []byte("testing")
+ key1 := &Key{Term: 1, Version: 1, Value: testKey, InstallTime: time.Now()}
+ k, err := k.AddKey(key1)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Term should be 1
+ if term := k.ActiveTerm(); term != 1 {
+ t.Fatalf("bad: %d", term)
+ }
+
+ // Should have key
+ key := k.ActiveKey()
+ if key == nil {
+ t.Fatalf("bad: %v", key)
+ }
+ if !bytes.Equal(key.Value, testKey) {
+ t.Fatalf("bad: %v", key)
+ }
+ if tKey := k.TermKey(1); tKey != key {
+ t.Fatalf("bad: %v", tKey)
+ }
+
+ // Should handle idempotent set
+ k, err = k.AddKey(key1)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should not allow conficting set
+ testConflict := []byte("nope")
+ key1Conf := &Key{Term: 1, Version: 1, Value: testConflict, InstallTime: time.Now()}
+ _, err = k.AddKey(key1Conf)
+ if err == nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Add a new key
+ testSecond := []byte("second")
+ key2 := &Key{Term: 2, Version: 1, Value: testSecond, InstallTime: time.Now()}
+ k, err = k.AddKey(key2)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Term should be 2
+ if term := k.ActiveTerm(); term != 2 {
+ t.Fatalf("bad: %d", term)
+ }
+
+ // Should have key
+ newKey := k.ActiveKey()
+ if newKey == nil {
+ t.Fatalf("bad: %v", key)
+ }
+ if !bytes.Equal(newKey.Value, testSecond) {
+ t.Fatalf("bad: %v", key)
+ }
+ if tKey := k.TermKey(2); tKey != newKey {
+ t.Fatalf("bad: %v", tKey)
+ }
+
+ // Read of old key should work
+ if tKey := k.TermKey(1); tKey != key {
+ t.Fatalf("bad: %v", tKey)
+ }
+
+ // Remove the old key
+ k, err = k.RemoveKey(1)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Read of old key should not work
+ if tKey := k.TermKey(1); tKey != nil {
+ t.Fatalf("bad: %v", tKey)
+ }
+
+ // Remove the active key should fail
+ k, err = k.RemoveKey(2)
+ if err == nil {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func TestKeyring_MasterKey(t *testing.T) {
+ k := NewKeyring()
+ master := []byte("test")
+ master2 := []byte("test2")
+
+ // Check no master
+ out := k.MasterKey()
+ if out != nil {
+ t.Fatalf("bad: %v", out)
+ }
+
+ // Set master
+ k = k.SetMasterKey(master)
+ out = k.MasterKey()
+ if !bytes.Equal(out, master) {
+ t.Fatalf("bad: %v", out)
+ }
+
+ // Update master
+ k = k.SetMasterKey(master2)
+ out = k.MasterKey()
+ if !bytes.Equal(out, master2) {
+ t.Fatalf("bad: %v", out)
+ }
+}
+
+func TestKeyring_Serialize(t *testing.T) {
+ k := NewKeyring()
+ master := []byte("test")
+ k = k.SetMasterKey(master)
+
+ now := time.Now()
+ testKey := []byte("testing")
+ testSecond := []byte("second")
+ k, _ = k.AddKey(&Key{Term: 1, Version: 1, Value: testKey, InstallTime: now})
+ k, _ = k.AddKey(&Key{Term: 2, Version: 1, Value: testSecond, InstallTime: now})
+
+ buf, err := k.Serialize()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ k2, err := DeserializeKeyring(buf)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ out := k2.MasterKey()
+ if !bytes.Equal(out, master) {
+ t.Fatalf("bad: %v", out)
+ }
+
+ if k2.ActiveTerm() != k.ActiveTerm() {
+ t.Fatalf("Term mismatch")
+ }
+
+ var i uint32
+ for i = 1; i < k.ActiveTerm(); i++ {
+ key1 := k2.TermKey(i)
+ key2 := k.TermKey(i)
+ // Work around timezone bug due to DeepEqual using == for comparison
+ if !key1.InstallTime.Equal(key2.InstallTime) {
+ t.Fatalf("bad: key 1:\n%#v\nkey 2:\n%#v", key1, key2)
+ }
+ key1.InstallTime = key2.InstallTime
+ if !reflect.DeepEqual(key1, key2) {
+ t.Fatalf("bad: key 1:\n%#v\nkey 2:\n%#v", key1, key2)
+ }
+ }
+}
+
+func TestKey_Serialize(t *testing.T) {
+ k := &Key{
+ Term: 10,
+ Version: 1,
+ Value: []byte("foobarbaz"),
+ InstallTime: time.Now(),
+ }
+
+ buf, err := k.Serialize()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ out, err := DeserializeKey(buf)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Work around timezone bug due to DeepEqual using == for comparison
+ if !k.InstallTime.Equal(out.InstallTime) {
+ t.Fatalf("bad: expected:\n%#v\nactual:\n%#v", k, out)
+ }
+ k.InstallTime = out.InstallTime
+
+ if !reflect.DeepEqual(k, out) {
+ t.Fatalf("bad: %#v", out)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/logical_cubbyhole.go b/vendor/github.com/hashicorp/vault/vault/logical_cubbyhole.go
new file mode 100644
index 0000000..76353b0
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/logical_cubbyhole.go
@@ -0,0 +1,207 @@
+package vault
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+// CubbyholeBackendFactory constructs a new cubbyhole backend
+func CubbyholeBackendFactory(conf *logical.BackendConfig) (logical.Backend, error) {
+ var b CubbyholeBackend
+ b.Backend = &framework.Backend{
+ Help: strings.TrimSpace(cubbyholeHelp),
+
+ Paths: []*framework.Path{
+ &framework.Path{
+ Pattern: ".*",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.handleRead,
+ logical.CreateOperation: b.handleWrite,
+ logical.UpdateOperation: b.handleWrite,
+ logical.DeleteOperation: b.handleDelete,
+ logical.ListOperation: b.handleList,
+ },
+
+ ExistenceCheck: b.handleExistenceCheck,
+
+ HelpSynopsis: strings.TrimSpace(cubbyholeHelpSynopsis),
+ HelpDescription: strings.TrimSpace(cubbyholeHelpDescription),
+ },
+ },
+ }
+
+ if conf == nil {
+ return nil, fmt.Errorf("Configuation passed into backend is nil")
+ }
+ b.Backend.Setup(conf)
+
+ return &b, nil
+}
+
+// CubbyholeBackend is used for storing secrets directly into the physical
+// backend. The secrets are encrypted in the durable storage.
+// This differs from generic in that every token has its own private
+// storage view. The view is removed when the token expires.
+type CubbyholeBackend struct {
+ *framework.Backend
+
+ saltUUID string
+ storageView logical.Storage
+}
+
+func (b *CubbyholeBackend) revoke(saltedToken string) error {
+ if saltedToken == "" {
+ return fmt.Errorf("cubbyhole: client token empty during revocation")
+ }
+
+ if err := logical.ClearView(b.storageView.(*BarrierView).SubView(saltedToken + "/")); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (b *CubbyholeBackend) handleExistenceCheck(
+ req *logical.Request, data *framework.FieldData) (bool, error) {
+ out, err := req.Storage.Get(req.ClientToken + "/" + req.Path)
+ if err != nil {
+ return false, fmt.Errorf("existence check failed: %v", err)
+ }
+
+ return out != nil, nil
+}
+
+func (b *CubbyholeBackend) handleRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ if req.ClientToken == "" {
+ return nil, fmt.Errorf("cubbyhole read: client token empty")
+ }
+
+ // Read the path
+ out, err := req.Storage.Get(req.ClientToken + "/" + req.Path)
+ if err != nil {
+ return nil, fmt.Errorf("read failed: %v", err)
+ }
+
+ // Fast-path the no data case
+ if out == nil {
+ return nil, nil
+ }
+
+ // Decode the data
+ var rawData map[string]interface{}
+ if err := jsonutil.DecodeJSON(out.Value, &rawData); err != nil {
+ return nil, fmt.Errorf("json decoding failed: %v", err)
+ }
+
+ // Generate the response
+ resp := &logical.Response{
+ Data: rawData,
+ }
+
+ return resp, nil
+}
+
+func (b *CubbyholeBackend) handleWrite(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ if req.ClientToken == "" {
+ return nil, fmt.Errorf("cubbyhole write: client token empty")
+ }
+ // Check that some fields are given
+ if len(req.Data) == 0 {
+ return nil, fmt.Errorf("missing data fields")
+ }
+
+ // JSON encode the data
+ buf, err := json.Marshal(req.Data)
+ if err != nil {
+ return nil, fmt.Errorf("json encoding failed: %v", err)
+ }
+
+ // Write out a new key
+ entry := &logical.StorageEntry{
+ Key: req.ClientToken + "/" + req.Path,
+ Value: buf,
+ }
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, fmt.Errorf("failed to write: %v", err)
+ }
+
+ return nil, nil
+}
+
+func (b *CubbyholeBackend) handleDelete(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ if req.ClientToken == "" {
+ return nil, fmt.Errorf("cubbyhole delete: client token empty")
+ }
+ // Delete the key at the request path
+ if err := req.Storage.Delete(req.ClientToken + "/" + req.Path); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *CubbyholeBackend) handleList(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ if req.ClientToken == "" {
+ return nil, fmt.Errorf("cubbyhole list: client token empty")
+ }
+
+ // Right now we only handle directories, so ensure it ends with / We also
+ // check if it's empty so we don't end up doing a listing on '//'
+ path := req.Path
+ if path != "" && !strings.HasSuffix(path, "/") {
+ path = path + "/"
+ }
+
+ // List the keys at the prefix given by the request
+ keys, err := req.Storage.List(req.ClientToken + "/" + path)
+ if err != nil {
+ return nil, err
+ }
+
+ // Strip the token
+ strippedKeys := make([]string, len(keys))
+ for i, key := range keys {
+ strippedKeys[i] = strings.TrimPrefix(key, req.ClientToken+"/")
+ }
+
+ // Generate the response
+ return logical.ListResponse(strippedKeys), nil
+}
+
+const cubbyholeHelp = `
+The cubbyhole backend reads and writes arbitrary secrets to the backend.
+The secrets are encrypted/decrypted by Vault: they are never stored
+unencrypted in the backend and the backend never has an opportunity to
+see the unencrypted value.
+
+This backend differs from the 'generic' backend in that it is namespaced
+per-token. Tokens can only read and write their own values, with no
+sharing possible (per-token cubbyholes). This can be useful for implementing
+certain authentication workflows, as well as "scratch" areas for individual
+clients. When the token is revoked, the entire set of stored values for that
+token is also removed.
+`
+
+const cubbyholeHelpSynopsis = `
+Pass-through secret storage to a token-specific cubbyhole in the storage
+backend, allowing you to read/write arbitrary data into secret storage.
+`
+
+const cubbyholeHelpDescription = `
+The cubbyhole backend reads and writes arbitrary data into secret storage,
+encrypting it along the way.
+
+The view into the cubbyhole storage space is different for each token; it is
+a per-token cubbyhole. When the token is revoked all values are removed.
+`
diff --git a/vendor/github.com/hashicorp/vault/vault/logical_cubbyhole_test.go b/vendor/github.com/hashicorp/vault/vault/logical_cubbyhole_test.go
new file mode 100644
index 0000000..8800115
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/logical_cubbyhole_test.go
@@ -0,0 +1,266 @@
+package vault
+
+import (
+ "reflect"
+ "sort"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestCubbyholeBackend_Write(t *testing.T) {
+ b := testCubbyholeBackend()
+ req := logical.TestRequest(t, logical.UpdateOperation, "foo")
+ clientToken, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ req.ClientToken = clientToken
+ storage := req.Storage
+ req.Data["raw"] = "test"
+
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+
+ req = logical.TestRequest(t, logical.ReadOperation, "foo")
+ req.Storage = storage
+ req.ClientToken = clientToken
+ _, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func TestCubbyholeBackend_Read(t *testing.T) {
+ b := testCubbyholeBackend()
+ req := logical.TestRequest(t, logical.UpdateOperation, "foo")
+ req.Data["raw"] = "test"
+ storage := req.Storage
+ clientToken, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ req.ClientToken = clientToken
+
+ if _, err := b.HandleRequest(req); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ req = logical.TestRequest(t, logical.ReadOperation, "foo")
+ req.Storage = storage
+ req.ClientToken = clientToken
+
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ expected := &logical.Response{
+ Data: map[string]interface{}{
+ "raw": "test",
+ },
+ }
+
+ if !reflect.DeepEqual(resp, expected) {
+ t.Fatalf("bad response.\n\nexpected: %#v\n\nGot: %#v", expected, resp)
+ }
+}
+
+func TestCubbyholeBackend_Delete(t *testing.T) {
+ b := testCubbyholeBackend()
+ req := logical.TestRequest(t, logical.UpdateOperation, "foo")
+ req.Data["raw"] = "test"
+ storage := req.Storage
+ clientToken, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ req.ClientToken = clientToken
+
+ if _, err := b.HandleRequest(req); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ req = logical.TestRequest(t, logical.DeleteOperation, "foo")
+ req.Storage = storage
+ req.ClientToken = clientToken
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+
+ req = logical.TestRequest(t, logical.ReadOperation, "foo")
+ req.Storage = storage
+ req.ClientToken = clientToken
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+}
+
+func TestCubbyholeBackend_List(t *testing.T) {
+ b := testCubbyholeBackend()
+ req := logical.TestRequest(t, logical.UpdateOperation, "foo")
+ clientToken, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ req.Data["raw"] = "test"
+ req.ClientToken = clientToken
+ storage := req.Storage
+
+ if _, err := b.HandleRequest(req); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ req = logical.TestRequest(t, logical.UpdateOperation, "bar")
+ req.Data["raw"] = "baz"
+ req.ClientToken = clientToken
+ req.Storage = storage
+
+ if _, err := b.HandleRequest(req); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ req = logical.TestRequest(t, logical.ListOperation, "")
+ req.Storage = storage
+ req.ClientToken = clientToken
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ expKeys := []string{"foo", "bar"}
+ respKeys := resp.Data["keys"].([]string)
+ sort.Strings(expKeys)
+ sort.Strings(respKeys)
+ if !reflect.DeepEqual(respKeys, expKeys) {
+ t.Fatalf("bad response.\n\nexpected: %#v\n\nGot: %#v", expKeys, respKeys)
+ }
+}
+
+func TestCubbyholeIsolation(t *testing.T) {
+ b := testCubbyholeBackend()
+
+ clientTokenA, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ clientTokenB, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ var storageA logical.Storage
+ var storageB logical.Storage
+
+ // Populate and test A entries
+ req := logical.TestRequest(t, logical.UpdateOperation, "foo")
+ req.ClientToken = clientTokenA
+ storageA = req.Storage
+ req.Data["raw"] = "test"
+
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+
+ req = logical.TestRequest(t, logical.ReadOperation, "foo")
+ req.Storage = storageA
+ req.ClientToken = clientTokenA
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ expected := &logical.Response{
+ Data: map[string]interface{}{
+ "raw": "test",
+ },
+ }
+
+ if !reflect.DeepEqual(resp, expected) {
+ t.Fatalf("bad response.\n\nexpected: %#v\n\nGot: %#v", expected, resp)
+ }
+
+ // Populate and test B entries
+ req = logical.TestRequest(t, logical.UpdateOperation, "bar")
+ req.ClientToken = clientTokenB
+ storageB = req.Storage
+ req.Data["raw"] = "baz"
+
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+
+ req = logical.TestRequest(t, logical.ReadOperation, "bar")
+ req.Storage = storageB
+ req.ClientToken = clientTokenB
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ expected = &logical.Response{
+ Data: map[string]interface{}{
+ "raw": "baz",
+ },
+ }
+
+ if !reflect.DeepEqual(resp, expected) {
+ t.Fatalf("bad response.\n\nexpected: %#v\n\nGot: %#v", expected, resp)
+ }
+
+ // We shouldn't be able to read A from B and vice versa
+ req = logical.TestRequest(t, logical.ReadOperation, "foo")
+ req.Storage = storageB
+ req.ClientToken = clientTokenB
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("err: was able to read from other user's cubbyhole")
+ }
+
+ req = logical.TestRequest(t, logical.ReadOperation, "bar")
+ req.Storage = storageA
+ req.ClientToken = clientTokenA
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("err: was able to read from other user's cubbyhole")
+ }
+}
+
+func testCubbyholeBackend() logical.Backend {
+ b, _ := CubbyholeBackendFactory(&logical.BackendConfig{
+ Logger: nil,
+ System: logical.StaticSystemView{
+ DefaultLeaseTTLVal: time.Hour * 24,
+ MaxLeaseTTLVal: time.Hour * 24 * 32,
+ },
+ })
+ return b
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/logical_passthrough.go b/vendor/github.com/hashicorp/vault/vault/logical_passthrough.go
new file mode 100644
index 0000000..eb52a3f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/logical_passthrough.go
@@ -0,0 +1,236 @@
+package vault
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/helper/parseutil"
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+// PassthroughBackendFactory returns a PassthroughBackend
+// with leases switched off
+func PassthroughBackendFactory(conf *logical.BackendConfig) (logical.Backend, error) {
+ return LeaseSwitchedPassthroughBackend(conf, false)
+}
+
+// PassthroughBackendWithLeasesFactory returns a PassthroughBackend
+// with leases switched on
+func LeasedPassthroughBackendFactory(conf *logical.BackendConfig) (logical.Backend, error) {
+ return LeaseSwitchedPassthroughBackend(conf, true)
+}
+
+// LeaseSwitchedPassthroughBackendFactory returns a PassthroughBackend
+// with leases switched on or off
+func LeaseSwitchedPassthroughBackend(conf *logical.BackendConfig, leases bool) (logical.Backend, error) {
+ var b PassthroughBackend
+ b.generateLeases = leases
+ b.Backend = &framework.Backend{
+ Help: strings.TrimSpace(passthroughHelp),
+
+ Paths: []*framework.Path{
+ &framework.Path{
+ Pattern: ".*",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.handleRead,
+ logical.CreateOperation: b.handleWrite,
+ logical.UpdateOperation: b.handleWrite,
+ logical.DeleteOperation: b.handleDelete,
+ logical.ListOperation: b.handleList,
+ },
+
+ ExistenceCheck: b.handleExistenceCheck,
+
+ HelpSynopsis: strings.TrimSpace(passthroughHelpSynopsis),
+ HelpDescription: strings.TrimSpace(passthroughHelpDescription),
+ },
+ },
+ }
+
+ b.Backend.Secrets = []*framework.Secret{
+ &framework.Secret{
+ Type: "generic",
+
+ Renew: b.handleRead,
+ Revoke: b.handleRevoke,
+ },
+ }
+
+ if conf == nil {
+ return nil, fmt.Errorf("Configuation passed into backend is nil")
+ }
+ b.Backend.Setup(conf)
+
+ return &b, nil
+}
+
+// PassthroughBackend is used storing secrets directly into the physical
+// backend. The secrets are encrypted in the durable storage and custom TTL
+// information can be specified, but otherwise this backend doesn't do anything
+// fancy.
+type PassthroughBackend struct {
+ *framework.Backend
+ generateLeases bool
+}
+
+func (b *PassthroughBackend) handleRevoke(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ // This is a no-op
+ return nil, nil
+}
+
+func (b *PassthroughBackend) handleExistenceCheck(
+ req *logical.Request, data *framework.FieldData) (bool, error) {
+ out, err := req.Storage.Get(req.Path)
+ if err != nil {
+ return false, fmt.Errorf("existence check failed: %v", err)
+ }
+
+ return out != nil, nil
+}
+
+func (b *PassthroughBackend) handleRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ // Read the path
+ out, err := req.Storage.Get(req.Path)
+ if err != nil {
+ return nil, fmt.Errorf("read failed: %v", err)
+ }
+
+ // Fast-path the no data case
+ if out == nil {
+ return nil, nil
+ }
+
+ // Decode the data
+ var rawData map[string]interface{}
+
+ if err := jsonutil.DecodeJSON(out.Value, &rawData); err != nil {
+ return nil, fmt.Errorf("json decoding failed: %v", err)
+ }
+
+ var resp *logical.Response
+ if b.generateLeases {
+ // Generate the response
+ resp = b.Secret("generic").Response(rawData, nil)
+ resp.Secret.Renewable = false
+ } else {
+ resp = &logical.Response{
+ Secret: &logical.Secret{},
+ Data: rawData,
+ }
+ }
+
+ // Check if there is a ttl key
+ var ttl string
+ ttl, _ = rawData["ttl"].(string)
+ if len(ttl) == 0 {
+ ttl, _ = rawData["lease"].(string)
+ }
+ ttlDuration := b.System().DefaultLeaseTTL()
+ if len(ttl) != 0 {
+ dur, err := parseutil.ParseDurationSecond(ttl)
+ if err == nil {
+ ttlDuration = dur
+ }
+
+ if b.generateLeases {
+ resp.Secret.Renewable = true
+ }
+ }
+
+ resp.Secret.TTL = ttlDuration
+
+ return resp, nil
+}
+
+func (b *PassthroughBackend) handleWrite(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ // Check that some fields are given
+ if len(req.Data) == 0 {
+ return logical.ErrorResponse("missing data fields"), nil
+ }
+
+ // JSON encode the data
+ buf, err := json.Marshal(req.Data)
+ if err != nil {
+ return nil, fmt.Errorf("json encoding failed: %v", err)
+ }
+
+ // Write out a new key
+ entry := &logical.StorageEntry{
+ Key: req.Path,
+ Value: buf,
+ }
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, fmt.Errorf("failed to write: %v", err)
+ }
+
+ return nil, nil
+}
+
+func (b *PassthroughBackend) handleDelete(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ // Delete the key at the request path
+ if err := req.Storage.Delete(req.Path); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *PassthroughBackend) handleList(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ // Right now we only handle directories, so ensure it ends with /; however,
+ // some physical backends may not handle the "/" case properly, so only add
+ // it if we're not listing the root
+ path := req.Path
+ if path != "" && !strings.HasSuffix(path, "/") {
+ path = path + "/"
+ }
+
+ // List the keys at the prefix given by the request
+ keys, err := req.Storage.List(path)
+ if err != nil {
+ return nil, err
+ }
+
+ // Generate the response
+ return logical.ListResponse(keys), nil
+}
+
+func (b *PassthroughBackend) GeneratesLeases() bool {
+ return b.generateLeases
+}
+
+const passthroughHelp = `
+The generic backend reads and writes arbitrary secrets to the backend.
+The secrets are encrypted/decrypted by Vault: they are never stored
+unencrypted in the backend and the backend never has an opportunity to
+see the unencrypted value.
+
+TTLs can be set on a per-secret basis. These TTLs will be sent down
+when that secret is read, and it is assumed that some outside process will
+revoke and/or replace the secret at that path.
+`
+
+const passthroughHelpSynopsis = `
+Pass-through secret storage to the storage backend, allowing you to
+read/write arbitrary data into secret storage.
+`
+
+const passthroughHelpDescription = `
+The pass-through backend reads and writes arbitrary data into secret storage,
+encrypting it along the way.
+
+A TTL can be specified when writing with the "ttl" field. If given, the
+duration of leases returned by this backend will be set to this value. This
+can be used as a hint from the writer of a secret to the consumer of a secret
+that the consumer should re-read the value before the TTL has expired.
+However, any revocation must be handled by the user of this backend; the lease
+duration does not affect the provided data in any way.
+`
diff --git a/vendor/github.com/hashicorp/vault/vault/logical_passthrough_test.go b/vendor/github.com/hashicorp/vault/vault/logical_passthrough_test.go
new file mode 100644
index 0000000..bd33d65
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/logical_passthrough_test.go
@@ -0,0 +1,208 @@
+package vault
+
+import (
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestPassthroughBackend_RootPaths(t *testing.T) {
+ b := testPassthroughBackend()
+ test := func(b logical.Backend) {
+ root := b.SpecialPaths()
+ if root != nil {
+ t.Fatalf("unexpected: %v", root)
+ }
+ }
+ test(b)
+ b = testPassthroughLeasedBackend()
+ test(b)
+}
+
+func TestPassthroughBackend_Write(t *testing.T) {
+ test := func(b logical.Backend) {
+ req := logical.TestRequest(t, logical.UpdateOperation, "foo")
+ req.Data["raw"] = "test"
+
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+
+ out, err := req.Storage.Get("foo")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out == nil {
+ t.Fatalf("failed to write to view")
+ }
+ }
+ b := testPassthroughBackend()
+ test(b)
+ b = testPassthroughLeasedBackend()
+ test(b)
+}
+
+func TestPassthroughBackend_Read(t *testing.T) {
+ test := func(b logical.Backend, ttlType string, leased bool) {
+ req := logical.TestRequest(t, logical.UpdateOperation, "foo")
+ req.Data["raw"] = "test"
+ req.Data[ttlType] = "1h"
+ storage := req.Storage
+
+ if _, err := b.HandleRequest(req); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ req = logical.TestRequest(t, logical.ReadOperation, "foo")
+ req.Storage = storage
+
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ expected := &logical.Response{
+ Secret: &logical.Secret{
+ LeaseOptions: logical.LeaseOptions{
+ Renewable: true,
+ TTL: time.Hour,
+ },
+ },
+ Data: map[string]interface{}{
+ "raw": "test",
+ ttlType: "1h",
+ },
+ }
+
+ if !leased {
+ expected.Secret.Renewable = false
+ }
+ resp.Secret.InternalData = nil
+ resp.Secret.LeaseID = ""
+ if !reflect.DeepEqual(resp, expected) {
+ t.Fatalf("bad response.\n\nexpected: %#v\n\nGot: %#v", expected, resp)
+ }
+ }
+ b := testPassthroughLeasedBackend()
+ test(b, "lease", true)
+ test(b, "ttl", true)
+ b = testPassthroughBackend()
+ test(b, "lease", false)
+ test(b, "ttl", false)
+}
+
+func TestPassthroughBackend_Delete(t *testing.T) {
+ test := func(b logical.Backend) {
+ req := logical.TestRequest(t, logical.UpdateOperation, "foo")
+ req.Data["raw"] = "test"
+ storage := req.Storage
+
+ if _, err := b.HandleRequest(req); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ req = logical.TestRequest(t, logical.DeleteOperation, "foo")
+ req.Storage = storage
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+
+ req = logical.TestRequest(t, logical.ReadOperation, "foo")
+ req.Storage = storage
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+ }
+ b := testPassthroughBackend()
+ test(b)
+ b = testPassthroughLeasedBackend()
+ test(b)
+}
+
+func TestPassthroughBackend_List(t *testing.T) {
+ test := func(b logical.Backend) {
+ req := logical.TestRequest(t, logical.UpdateOperation, "foo")
+ req.Data["raw"] = "test"
+ storage := req.Storage
+
+ if _, err := b.HandleRequest(req); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ req = logical.TestRequest(t, logical.ListOperation, "")
+ req.Storage = storage
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ expected := &logical.Response{
+ Data: map[string]interface{}{
+ "keys": []string{"foo"},
+ },
+ }
+
+ if !reflect.DeepEqual(resp, expected) {
+ t.Fatalf("bad response.\n\nexpected: %#v\n\nGot: %#v", expected, resp)
+ }
+ }
+ b := testPassthroughBackend()
+ test(b)
+ b = testPassthroughLeasedBackend()
+ test(b)
+}
+
+func TestPassthroughBackend_Revoke(t *testing.T) {
+ test := func(b logical.Backend) {
+ req := logical.TestRequest(t, logical.RevokeOperation, "generic")
+ req.Secret = &logical.Secret{
+ InternalData: map[string]interface{}{
+ "secret_type": "generic",
+ },
+ }
+
+ if _, err := b.HandleRequest(req); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ }
+ b := testPassthroughBackend()
+ test(b)
+ b = testPassthroughLeasedBackend()
+ test(b)
+}
+
+func testPassthroughBackend() logical.Backend {
+ b, _ := PassthroughBackendFactory(&logical.BackendConfig{
+ Logger: nil,
+ System: logical.StaticSystemView{
+ DefaultLeaseTTLVal: time.Hour * 24,
+ MaxLeaseTTLVal: time.Hour * 24 * 32,
+ },
+ })
+ return b
+}
+
+func testPassthroughLeasedBackend() logical.Backend {
+ b, _ := LeasedPassthroughBackendFactory(&logical.BackendConfig{
+ Logger: nil,
+ System: logical.StaticSystemView{
+ DefaultLeaseTTLVal: time.Hour * 24,
+ MaxLeaseTTLVal: time.Hour * 24 * 32,
+ },
+ })
+ return b
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/logical_system.go b/vendor/github.com/hashicorp/vault/vault/logical_system.go
new file mode 100644
index 0000000..5fdf312
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/logical_system.go
@@ -0,0 +1,2545 @@
+package vault
+
+import (
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/hashicorp/vault/helper/consts"
+ "github.com/hashicorp/vault/helper/parseutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ "github.com/mitchellh/mapstructure"
+)
+
+var (
+ // protectedPaths cannot be accessed via the raw APIs.
+ // This is both for security and to prevent disrupting Vault.
+ protectedPaths = []string{
+ "core",
+ }
+
+ replicationPaths = func(b *SystemBackend) []*framework.Path {
+ return []*framework.Path{
+ &framework.Path{
+ Pattern: "replication/status",
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ var state consts.ReplicationState
+ resp := &logical.Response{
+ Data: map[string]interface{}{
+ "mode": state.String(),
+ },
+ }
+ return resp, nil
+ },
+ },
+ },
+ }
+ }
+)
+
+func NewSystemBackend(core *Core, config *logical.BackendConfig) (logical.Backend, error) {
+ b := &SystemBackend{
+ Core: core,
+ }
+
+ b.Backend = &framework.Backend{
+ Help: strings.TrimSpace(sysHelpRoot),
+
+ PathsSpecial: &logical.Paths{
+ Root: []string{
+ "auth/*",
+ "remount",
+ "audit",
+ "audit/*",
+ "raw/*",
+ "replication/primary/secondary-token",
+ "replication/reindex",
+ "rotate",
+ "config/auditing/*",
+ "revoke-prefix/*",
+ "leases/revoke-prefix/*",
+ "leases/revoke-force/*",
+ "leases/lookup/*",
+ },
+
+ Unauthenticated: []string{
+ "wrapping/pubkey",
+ "replication/status",
+ },
+ },
+
+ Paths: []*framework.Path{
+ &framework.Path{
+ Pattern: "capabilities-accessor$",
+
+ Fields: map[string]*framework.FieldSchema{
+ "accessor": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Accessor of the token for which capabilities are being queried.",
+ },
+ "path": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Path on which capabilities are being queried.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.handleCapabilitiesAccessor,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["capabilities_accessor"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["capabilities_accessor"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "capabilities$",
+
+ Fields: map[string]*framework.FieldSchema{
+ "token": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Token for which capabilities are being queried.",
+ },
+ "path": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Path on which capabilities are being queried.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.handleCapabilities,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["capabilities"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["capabilities"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "capabilities-self$",
+
+ Fields: map[string]*framework.FieldSchema{
+ "token": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Token for which capabilities are being queried.",
+ },
+ "path": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Path on which capabilities are being queried.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.handleCapabilities,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["capabilities_self"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["capabilities_self"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "generate-root(/attempt)?$",
+ HelpSynopsis: strings.TrimSpace(sysHelp["generate-root"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["generate-root"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "init$",
+ HelpSynopsis: strings.TrimSpace(sysHelp["init"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["init"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "rekey/backup$",
+
+ Fields: map[string]*framework.FieldSchema{},
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.handleRekeyRetrieveBarrier,
+ logical.DeleteOperation: b.handleRekeyDeleteBarrier,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["rekey_backup"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["rekey_backup"][0]),
+ },
+
+ &framework.Path{
+ Pattern: "rekey/recovery-key-backup$",
+
+ Fields: map[string]*framework.FieldSchema{},
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.handleRekeyRetrieveRecovery,
+ logical.DeleteOperation: b.handleRekeyDeleteRecovery,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["rekey_backup"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["rekey_backup"][0]),
+ },
+
+ &framework.Path{
+ Pattern: "auth/(?P.+?)/tune$",
+ Fields: map[string]*framework.FieldSchema{
+ "path": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["auth_tune"][0]),
+ },
+ "default_lease_ttl": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["tune_default_lease_ttl"][0]),
+ },
+ "max_lease_ttl": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["tune_max_lease_ttl"][0]),
+ },
+ },
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.handleAuthTuneRead,
+ logical.UpdateOperation: b.handleAuthTuneWrite,
+ },
+ HelpSynopsis: strings.TrimSpace(sysHelp["auth_tune"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["auth_tune"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "mounts/(?P.+?)/tune$",
+
+ Fields: map[string]*framework.FieldSchema{
+ "path": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["mount_path"][0]),
+ },
+ "default_lease_ttl": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["tune_default_lease_ttl"][0]),
+ },
+ "max_lease_ttl": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["tune_max_lease_ttl"][0]),
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.handleMountTuneRead,
+ logical.UpdateOperation: b.handleMountTuneWrite,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["mount_tune"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["mount_tune"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "mounts/(?P.+?)",
+
+ Fields: map[string]*framework.FieldSchema{
+ "path": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["mount_path"][0]),
+ },
+ "type": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["mount_type"][0]),
+ },
+ "description": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["mount_desc"][0]),
+ },
+ "config": &framework.FieldSchema{
+ Type: framework.TypeMap,
+ Description: strings.TrimSpace(sysHelp["mount_config"][0]),
+ },
+ "local": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: false,
+ Description: strings.TrimSpace(sysHelp["mount_local"][0]),
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.handleMount,
+ logical.DeleteOperation: b.handleUnmount,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["mount"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["mount"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "mounts$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.handleMountTable,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["mounts"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["mounts"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "remount",
+
+ Fields: map[string]*framework.FieldSchema{
+ "from": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "The previous mount point.",
+ },
+ "to": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "The new mount point.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.handleRemount,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["remount"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["remount"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "leases/lookup/(?P.+?)?",
+
+ Fields: map[string]*framework.FieldSchema{
+ "prefix": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["leases-list-prefix"][0]),
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.handleLeaseLookupList,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["leases"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["leases"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "leases/lookup",
+
+ Fields: map[string]*framework.FieldSchema{
+ "lease_id": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["lease_id"][0]),
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.handleLeaseLookup,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["leases"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["leases"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "(leases/)?renew" + framework.OptionalParamRegex("url_lease_id"),
+
+ Fields: map[string]*framework.FieldSchema{
+ "url_lease_id": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["lease_id"][0]),
+ },
+ "lease_id": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["lease_id"][0]),
+ },
+ "increment": &framework.FieldSchema{
+ Type: framework.TypeDurationSecond,
+ Description: strings.TrimSpace(sysHelp["increment"][0]),
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.handleRenew,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["renew"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["renew"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "(leases/)?revoke" + framework.OptionalParamRegex("url_lease_id"),
+
+ Fields: map[string]*framework.FieldSchema{
+ "url_lease_id": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["lease_id"][0]),
+ },
+ "lease_id": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["lease_id"][0]),
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.handleRevoke,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["revoke"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["revoke"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "(leases/)?revoke-force/(?P.+)",
+
+ Fields: map[string]*framework.FieldSchema{
+ "prefix": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["revoke-force-path"][0]),
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.handleRevokeForce,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["revoke-force"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["revoke-force"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "(leases/)?revoke-prefix/(?P.+)",
+
+ Fields: map[string]*framework.FieldSchema{
+ "prefix": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["revoke-prefix-path"][0]),
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.handleRevokePrefix,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["revoke-prefix"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["revoke-prefix"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "auth$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.handleAuthTable,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["auth-table"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["auth-table"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "auth/(?P.+)",
+
+ Fields: map[string]*framework.FieldSchema{
+ "path": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["auth_path"][0]),
+ },
+ "type": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["auth_type"][0]),
+ },
+ "description": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["auth_desc"][0]),
+ },
+ "local": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: false,
+ Description: strings.TrimSpace(sysHelp["mount_local"][0]),
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.handleEnableAuth,
+ logical.DeleteOperation: b.handleDisableAuth,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["auth"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["auth"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "policy$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.handlePolicyList,
+ logical.ListOperation: b.handlePolicyList,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["policy-list"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["policy-list"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "policy/(?P.+)",
+
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["policy-name"][0]),
+ },
+ "rules": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["policy-rules"][0]),
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.handlePolicyRead,
+ logical.UpdateOperation: b.handlePolicySet,
+ logical.DeleteOperation: b.handlePolicyDelete,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["policy"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["policy"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "seal-status$",
+ HelpSynopsis: strings.TrimSpace(sysHelp["seal-status"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["seal-status"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "seal$",
+ HelpSynopsis: strings.TrimSpace(sysHelp["seal"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["seal"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "unseal$",
+ HelpSynopsis: strings.TrimSpace(sysHelp["unseal"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["unseal"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "audit-hash/(?P.+)",
+
+ Fields: map[string]*framework.FieldSchema{
+ "path": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["audit_path"][0]),
+ },
+
+ "input": &framework.FieldSchema{
+ Type: framework.TypeString,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.handleAuditHash,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["audit-hash"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["audit-hash"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "audit$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.handleAuditTable,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["audit-table"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["audit-table"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "audit/(?P.+)",
+
+ Fields: map[string]*framework.FieldSchema{
+ "path": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["audit_path"][0]),
+ },
+ "type": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["audit_type"][0]),
+ },
+ "description": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["audit_desc"][0]),
+ },
+ "options": &framework.FieldSchema{
+ Type: framework.TypeMap,
+ Description: strings.TrimSpace(sysHelp["audit_opts"][0]),
+ },
+ "local": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: false,
+ Description: strings.TrimSpace(sysHelp["mount_local"][0]),
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.handleEnableAudit,
+ logical.DeleteOperation: b.handleDisableAudit,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["audit"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["audit"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "raw/(?P.+)",
+
+ Fields: map[string]*framework.FieldSchema{
+ "path": &framework.FieldSchema{
+ Type: framework.TypeString,
+ },
+ "value": &framework.FieldSchema{
+ Type: framework.TypeString,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.handleRawRead,
+ logical.UpdateOperation: b.handleRawWrite,
+ logical.DeleteOperation: b.handleRawDelete,
+ },
+ },
+
+ &framework.Path{
+ Pattern: "key-status$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.handleKeyStatus,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["key-status"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["key-status"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "rotate$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.handleRotate,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["rotate"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["rotate"][1]),
+ },
+
+ /*
+ // Disabled for the moment as we don't support this externally
+ &framework.Path{
+ Pattern: "wrapping/pubkey$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.handleWrappingPubkey,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["wrappubkey"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["wrappubkey"][1]),
+ },
+ */
+
+ &framework.Path{
+ Pattern: "wrapping/wrap$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.handleWrappingWrap,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["wrap"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["wrap"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "wrapping/unwrap$",
+
+ Fields: map[string]*framework.FieldSchema{
+ "token": &framework.FieldSchema{
+ Type: framework.TypeString,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.handleWrappingUnwrap,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["unwrap"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["unwrap"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "wrapping/lookup$",
+
+ Fields: map[string]*framework.FieldSchema{
+ "token": &framework.FieldSchema{
+ Type: framework.TypeString,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.handleWrappingLookup,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["wraplookup"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["wraplookup"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "wrapping/rewrap$",
+
+ Fields: map[string]*framework.FieldSchema{
+ "token": &framework.FieldSchema{
+ Type: framework.TypeString,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.handleWrappingRewrap,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["rewrap"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["rewrap"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "config/auditing/request-headers/(?P.+)",
+
+ Fields: map[string]*framework.FieldSchema{
+ "header": &framework.FieldSchema{
+ Type: framework.TypeString,
+ },
+ "hmac": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.handleAuditedHeaderUpdate,
+ logical.DeleteOperation: b.handleAuditedHeaderDelete,
+ logical.ReadOperation: b.handleAuditedHeaderRead,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["audited-headers-name"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["audited-headers-name"][1]),
+ },
+
+ &framework.Path{
+ Pattern: "config/auditing/request-headers$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.handleAuditedHeadersRead,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["audited-headers"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["audited-headers"][1]),
+ },
+ },
+ }
+
+ b.Backend.Paths = append(b.Backend.Paths, replicationPaths(b)...)
+
+ b.Backend.Invalidate = b.invalidate
+
+ return b.Backend.Setup(config)
+}
+
+// SystemBackend implements logical.Backend and is used to interact with
+// the core of the system. This backend is hardcoded to exist at the "sys"
+// prefix. Conceptually it is similar to procfs on Linux.
+type SystemBackend struct {
+ Core *Core
+ Backend *framework.Backend
+}
+
+func (b *SystemBackend) invalidate(key string) {
+ if b.Core.logger.IsTrace() {
+ b.Core.logger.Trace("sys: invaliding key", "key", key)
+ }
+ switch {
+ case strings.HasPrefix(key, policySubPath):
+ b.Core.stateLock.RLock()
+ defer b.Core.stateLock.RUnlock()
+ if b.Core.policyStore != nil {
+ b.Core.policyStore.invalidate(strings.TrimPrefix(key, policySubPath))
+ }
+ }
+}
+
+// handleAuditedHeaderUpdate creates or overwrites a header entry
+func (b *SystemBackend) handleAuditedHeaderUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ header := d.Get("header").(string)
+ hmac := d.Get("hmac").(bool)
+ if header == "" {
+ return logical.ErrorResponse("missing header name"), nil
+ }
+
+ headerConfig := b.Core.AuditedHeadersConfig()
+ err := headerConfig.add(header, hmac)
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+// handleAudtedHeaderDelete deletes the header with the given name
+func (b *SystemBackend) handleAuditedHeaderDelete(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ header := d.Get("header").(string)
+ if header == "" {
+ return logical.ErrorResponse("missing header name"), nil
+ }
+
+ headerConfig := b.Core.AuditedHeadersConfig()
+ err := headerConfig.remove(header)
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+// handleAuditedHeaderRead returns the header configuration for the given header name
+func (b *SystemBackend) handleAuditedHeaderRead(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ header := d.Get("header").(string)
+ if header == "" {
+ return logical.ErrorResponse("missing header name"), nil
+ }
+
+ headerConfig := b.Core.AuditedHeadersConfig()
+ settings, ok := headerConfig.Headers[header]
+ if !ok {
+ return logical.ErrorResponse("Could not find header in config"), nil
+ }
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ header: settings,
+ },
+ }, nil
+}
+
+// handleAuditedHeadersRead returns the whole audited headers config
+func (b *SystemBackend) handleAuditedHeadersRead(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ headerConfig := b.Core.AuditedHeadersConfig()
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "headers": headerConfig.Headers,
+ },
+ }, nil
+}
+
+// handleCapabilitiesreturns the ACL capabilities of the token for a given path
+func (b *SystemBackend) handleCapabilities(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ token := d.Get("token").(string)
+ if token == "" {
+ token = req.ClientToken
+ }
+ capabilities, err := b.Core.Capabilities(token, d.Get("path").(string))
+ if err != nil {
+ return nil, err
+ }
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "capabilities": capabilities,
+ },
+ }, nil
+}
+
+// handleCapabilitiesAccessor returns the ACL capabilities of the token associted
+// with the given accessor for a given path.
+func (b *SystemBackend) handleCapabilitiesAccessor(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ accessor := d.Get("accessor").(string)
+ if accessor == "" {
+ return logical.ErrorResponse("missing accessor"), nil
+ }
+
+ aEntry, err := b.Core.tokenStore.lookupByAccessor(accessor)
+ if err != nil {
+ return nil, err
+ }
+
+ capabilities, err := b.Core.Capabilities(aEntry.TokenID, d.Get("path").(string))
+ if err != nil {
+ return nil, err
+ }
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "capabilities": capabilities,
+ },
+ }, nil
+}
+
+// handleRekeyRetrieve returns backed-up, PGP-encrypted unseal keys from a
+// rekey operation
+func (b *SystemBackend) handleRekeyRetrieve(
+ req *logical.Request,
+ data *framework.FieldData,
+ recovery bool) (*logical.Response, error) {
+ backup, err := b.Core.RekeyRetrieveBackup(recovery)
+ if err != nil {
+ return nil, fmt.Errorf("unable to look up backed-up keys: %v", err)
+ }
+ if backup == nil {
+ return logical.ErrorResponse("no backed-up keys found"), nil
+ }
+
+ keysB64 := map[string][]string{}
+ for k, v := range backup.Keys {
+ for _, j := range v {
+ currB64Keys := keysB64[k]
+ if currB64Keys == nil {
+ currB64Keys = []string{}
+ }
+ key, err := hex.DecodeString(j)
+ if err != nil {
+ return nil, fmt.Errorf("error decoding hex-encoded backup key: %v", err)
+ }
+ currB64Keys = append(currB64Keys, base64.StdEncoding.EncodeToString(key))
+ keysB64[k] = currB64Keys
+ }
+ }
+
+ // Format the status
+ resp := &logical.Response{
+ Data: map[string]interface{}{
+ "nonce": backup.Nonce,
+ "keys": backup.Keys,
+ "keys_base64": keysB64,
+ },
+ }
+
+ return resp, nil
+}
+
+func (b *SystemBackend) handleRekeyRetrieveBarrier(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ return b.handleRekeyRetrieve(req, data, false)
+}
+
+func (b *SystemBackend) handleRekeyRetrieveRecovery(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ return b.handleRekeyRetrieve(req, data, true)
+}
+
+// handleRekeyDelete deletes backed-up, PGP-encrypted unseal keys from a rekey
+// operation
+func (b *SystemBackend) handleRekeyDelete(
+ req *logical.Request,
+ data *framework.FieldData,
+ recovery bool) (*logical.Response, error) {
+ err := b.Core.RekeyDeleteBackup(recovery)
+ if err != nil {
+ return nil, fmt.Errorf("error during deletion of backed-up keys: %v", err)
+ }
+
+ return nil, nil
+}
+
+func (b *SystemBackend) handleRekeyDeleteBarrier(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ return b.handleRekeyDelete(req, data, false)
+}
+
+func (b *SystemBackend) handleRekeyDeleteRecovery(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ return b.handleRekeyDelete(req, data, true)
+}
+
+// handleMountTable handles the "mounts" endpoint to provide the mount table
+func (b *SystemBackend) handleMountTable(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ b.Core.mountsLock.RLock()
+ defer b.Core.mountsLock.RUnlock()
+
+ resp := &logical.Response{
+ Data: make(map[string]interface{}),
+ }
+
+ for _, entry := range b.Core.mounts.Entries {
+ info := map[string]interface{}{
+ "type": entry.Type,
+ "description": entry.Description,
+ "config": map[string]interface{}{
+ "default_lease_ttl": int64(entry.Config.DefaultLeaseTTL.Seconds()),
+ "max_lease_ttl": int64(entry.Config.MaxLeaseTTL.Seconds()),
+ "force_no_cache": entry.Config.ForceNoCache,
+ },
+ "local": entry.Local,
+ }
+
+ resp.Data[entry.Path] = info
+ }
+
+ return resp, nil
+}
+
+// handleMount is used to mount a new path
+func (b *SystemBackend) handleMount(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ b.Core.clusterParamsLock.RLock()
+ repState := b.Core.replicationState
+ b.Core.clusterParamsLock.RUnlock()
+
+ local := data.Get("local").(bool)
+ if !local && repState == consts.ReplicationSecondary {
+ return logical.ErrorResponse("cannot add a non-local mount to a replication secondary"), nil
+ }
+
+ // Get all the options
+ path := data.Get("path").(string)
+ logicalType := data.Get("type").(string)
+ description := data.Get("description").(string)
+
+ path = sanitizeMountPath(path)
+
+ var config MountConfig
+
+ var apiConfig struct {
+ DefaultLeaseTTL string `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
+ MaxLeaseTTL string `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
+ ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"`
+ }
+ configMap := data.Get("config").(map[string]interface{})
+ if configMap != nil && len(configMap) != 0 {
+ err := mapstructure.Decode(configMap, &apiConfig)
+ if err != nil {
+ return logical.ErrorResponse(
+ "unable to convert given mount config information"),
+ logical.ErrInvalidRequest
+ }
+ }
+
+ switch apiConfig.DefaultLeaseTTL {
+ case "":
+ case "system":
+ default:
+ tmpDef, err := parseutil.ParseDurationSecond(apiConfig.DefaultLeaseTTL)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "unable to parse default TTL of %s: %s", apiConfig.DefaultLeaseTTL, err)),
+ logical.ErrInvalidRequest
+ }
+ config.DefaultLeaseTTL = tmpDef
+ }
+
+ switch apiConfig.MaxLeaseTTL {
+ case "":
+ case "system":
+ default:
+ tmpMax, err := parseutil.ParseDurationSecond(apiConfig.MaxLeaseTTL)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "unable to parse max TTL of %s: %s", apiConfig.MaxLeaseTTL, err)),
+ logical.ErrInvalidRequest
+ }
+ config.MaxLeaseTTL = tmpMax
+ }
+
+ if config.MaxLeaseTTL != 0 && config.DefaultLeaseTTL > config.MaxLeaseTTL {
+ return logical.ErrorResponse(
+ "given default lease TTL greater than given max lease TTL"),
+ logical.ErrInvalidRequest
+ }
+
+ if config.DefaultLeaseTTL > b.Core.maxLeaseTTL {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "given default lease TTL greater than system max lease TTL of %d", int(b.Core.maxLeaseTTL.Seconds()))),
+ logical.ErrInvalidRequest
+ }
+
+ // Copy over the force no cache if set
+ if apiConfig.ForceNoCache {
+ config.ForceNoCache = true
+ }
+
+ if logicalType == "" {
+ return logical.ErrorResponse(
+ "backend type must be specified as a string"),
+ logical.ErrInvalidRequest
+ }
+
+ // Create the mount entry
+ me := &MountEntry{
+ Table: mountTableType,
+ Path: path,
+ Type: logicalType,
+ Description: description,
+ Config: config,
+ Local: local,
+ }
+
+ // Attempt mount
+ if err := b.Core.mount(me); err != nil {
+ b.Backend.Logger().Error("sys: mount failed", "path", me.Path, "error", err)
+ return handleError(err)
+ }
+
+ return nil, nil
+}
+
+// used to intercept an HTTPCodedError so it goes back to callee
+func handleError(
+ err error) (*logical.Response, error) {
+ switch err.(type) {
+ case logical.HTTPCodedError:
+ return logical.ErrorResponse(err.Error()), err
+ default:
+ return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
+ }
+}
+
+// handleUnmount is used to unmount a path
+func (b *SystemBackend) handleUnmount(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ b.Core.clusterParamsLock.RLock()
+ repState := b.Core.replicationState
+ b.Core.clusterParamsLock.RUnlock()
+
+ suffix := strings.TrimPrefix(req.Path, "mounts/")
+ if len(suffix) == 0 {
+ return logical.ErrorResponse("path cannot be blank"), logical.ErrInvalidRequest
+ }
+
+ suffix = sanitizeMountPath(suffix)
+
+ entry := b.Core.router.MatchingMountEntry(suffix)
+ if entry != nil && !entry.Local && repState == consts.ReplicationSecondary {
+ return logical.ErrorResponse("cannot unmount a non-local mount on a replication secondary"), nil
+ }
+
+ // Attempt unmount
+ if existed, err := b.Core.unmount(suffix); existed && err != nil {
+ b.Backend.Logger().Error("sys: unmount failed", "path", suffix, "error", err)
+ return handleError(err)
+ }
+
+ return nil, nil
+}
+
+// handleRemount is used to remount a path
+func (b *SystemBackend) handleRemount(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ b.Core.clusterParamsLock.RLock()
+ repState := b.Core.replicationState
+ b.Core.clusterParamsLock.RUnlock()
+
+ // Get the paths
+ fromPath := data.Get("from").(string)
+ toPath := data.Get("to").(string)
+ if fromPath == "" || toPath == "" {
+ return logical.ErrorResponse(
+ "both 'from' and 'to' path must be specified as a string"),
+ logical.ErrInvalidRequest
+ }
+
+ fromPath = sanitizeMountPath(fromPath)
+ toPath = sanitizeMountPath(toPath)
+
+ entry := b.Core.router.MatchingMountEntry(fromPath)
+ if entry != nil && !entry.Local && repState == consts.ReplicationSecondary {
+ return logical.ErrorResponse("cannot remount a non-local mount on a replication secondary"), nil
+ }
+
+ // Attempt remount
+ if err := b.Core.remount(fromPath, toPath); err != nil {
+ b.Backend.Logger().Error("sys: remount failed", "from_path", fromPath, "to_path", toPath, "error", err)
+ return handleError(err)
+ }
+
+ return nil, nil
+}
+
+// handleAuthTuneRead is used to get config settings on a auth path
+func (b *SystemBackend) handleAuthTuneRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ path := data.Get("path").(string)
+ if path == "" {
+ return logical.ErrorResponse(
+ "path must be specified as a string"),
+ logical.ErrInvalidRequest
+ }
+ return b.handleTuneReadCommon("auth/" + path)
+}
+
+// handleMountTuneRead is used to get config settings on a backend
+func (b *SystemBackend) handleMountTuneRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ path := data.Get("path").(string)
+ if path == "" {
+ return logical.ErrorResponse(
+ "path must be specified as a string"),
+ logical.ErrInvalidRequest
+ }
+
+ // This call will read both logical backend's configuration as well as auth backends'.
+ // Retaining this behavior for backward compatibility. If this behavior is not desired,
+ // an error can be returned if path has a prefix of "auth/".
+ return b.handleTuneReadCommon(path)
+}
+
+// handleTuneReadCommon returns the config settings of a path
+func (b *SystemBackend) handleTuneReadCommon(path string) (*logical.Response, error) {
+ path = sanitizeMountPath(path)
+
+ sysView := b.Core.router.MatchingSystemView(path)
+ if sysView == nil {
+ b.Backend.Logger().Error("sys: cannot fetch sysview", "path", path)
+ return handleError(fmt.Errorf("sys: cannot fetch sysview for path %s", path))
+ }
+
+ mountEntry := b.Core.router.MatchingMountEntry(path)
+ if mountEntry == nil {
+ b.Backend.Logger().Error("sys: cannot fetch mount entry", "path", path)
+ return handleError(fmt.Errorf("sys: cannot fetch mount entry for path %s", path))
+ }
+
+ resp := &logical.Response{
+ Data: map[string]interface{}{
+ "default_lease_ttl": int(sysView.DefaultLeaseTTL().Seconds()),
+ "max_lease_ttl": int(sysView.MaxLeaseTTL().Seconds()),
+ "force_no_cache": mountEntry.Config.ForceNoCache,
+ },
+ }
+
+ return resp, nil
+}
+
+// handleAuthTuneWrite is used to set config settings on an auth path
+func (b *SystemBackend) handleAuthTuneWrite(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ path := data.Get("path").(string)
+ if path == "" {
+ return logical.ErrorResponse("path must be specified as a string"),
+ logical.ErrInvalidRequest
+ }
+ return b.handleTuneWriteCommon("auth/"+path, data)
+}
+
+// handleMountTuneWrite is used to set config settings on a backend
+func (b *SystemBackend) handleMountTuneWrite(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ path := data.Get("path").(string)
+ if path == "" {
+ return logical.ErrorResponse("path must be specified as a string"),
+ logical.ErrInvalidRequest
+ }
+ // This call will write both logical backend's configuration as well as auth backends'.
+ // Retaining this behavior for backward compatibility. If this behavior is not desired,
+ // an error can be returned if path has a prefix of "auth/".
+ return b.handleTuneWriteCommon(path, data)
+}
+
+// handleTuneWriteCommon is used to set config settings on a path
+func (b *SystemBackend) handleTuneWriteCommon(
+ path string, data *framework.FieldData) (*logical.Response, error) {
+ b.Core.clusterParamsLock.RLock()
+ repState := b.Core.replicationState
+ b.Core.clusterParamsLock.RUnlock()
+
+ path = sanitizeMountPath(path)
+
+ // Prevent protected paths from being changed
+ for _, p := range untunableMounts {
+ if strings.HasPrefix(path, p) {
+ b.Backend.Logger().Error("sys: cannot tune this mount", "path", path)
+ return handleError(fmt.Errorf("sys: cannot tune '%s'", path))
+ }
+ }
+
+ mountEntry := b.Core.router.MatchingMountEntry(path)
+ if mountEntry == nil {
+ b.Backend.Logger().Error("sys: tune failed: no mount entry found", "path", path)
+ return handleError(fmt.Errorf("sys: tune of path '%s' failed: no mount entry found", path))
+ }
+ if mountEntry != nil && !mountEntry.Local && repState == consts.ReplicationSecondary {
+ return logical.ErrorResponse("cannot tune a non-local mount on a replication secondary"), nil
+ }
+
+ var lock *sync.RWMutex
+ switch {
+ case strings.HasPrefix(path, "auth/"):
+ lock = &b.Core.authLock
+ default:
+ lock = &b.Core.mountsLock
+ }
+
+ // Timing configuration parameters
+ {
+ var newDefault, newMax *time.Duration
+ defTTL := data.Get("default_lease_ttl").(string)
+ switch defTTL {
+ case "":
+ case "system":
+ tmpDef := time.Duration(0)
+ newDefault = &tmpDef
+ default:
+ tmpDef, err := parseutil.ParseDurationSecond(defTTL)
+ if err != nil {
+ return handleError(err)
+ }
+ newDefault = &tmpDef
+ }
+
+ maxTTL := data.Get("max_lease_ttl").(string)
+ switch maxTTL {
+ case "":
+ case "system":
+ tmpMax := time.Duration(0)
+ newMax = &tmpMax
+ default:
+ tmpMax, err := parseutil.ParseDurationSecond(maxTTL)
+ if err != nil {
+ return handleError(err)
+ }
+ newMax = &tmpMax
+ }
+
+ if newDefault != nil || newMax != nil {
+ lock.Lock()
+ defer lock.Unlock()
+
+ if err := b.tuneMountTTLs(path, mountEntry, newDefault, newMax); err != nil {
+ b.Backend.Logger().Error("sys: tuning failed", "path", path, "error", err)
+ return handleError(err)
+ }
+ }
+ }
+
+ return nil, nil
+}
+
+// handleLease is use to view the metadata for a given LeaseID
+func (b *SystemBackend) handleLeaseLookup(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ leaseID := data.Get("lease_id").(string)
+ if leaseID == "" {
+ return logical.ErrorResponse("lease_id must be specified"),
+ logical.ErrInvalidRequest
+ }
+
+ leaseTimes, err := b.Core.expiration.FetchLeaseTimes(leaseID)
+ if err != nil {
+ b.Backend.Logger().Error("sys: error retrieving lease", "lease_id", leaseID, "error", err)
+ return handleError(err)
+ }
+ if leaseTimes == nil {
+ return logical.ErrorResponse("invalid lease"), logical.ErrInvalidRequest
+ }
+
+ resp := &logical.Response{
+ Data: map[string]interface{}{
+ "id": leaseID,
+ "issue_time": leaseTimes.IssueTime,
+ "expire_time": nil,
+ "last_renewal": nil,
+ "ttl": int64(0),
+ },
+ }
+ renewable, _ := leaseTimes.renewable()
+ resp.Data["renewable"] = renewable
+
+ if !leaseTimes.LastRenewalTime.IsZero() {
+ resp.Data["last_renewal"] = leaseTimes.LastRenewalTime
+ }
+ if !leaseTimes.ExpireTime.IsZero() {
+ resp.Data["expire_time"] = leaseTimes.ExpireTime
+ resp.Data["ttl"] = leaseTimes.ttl()
+ }
+ return resp, nil
+}
+
+func (b *SystemBackend) handleLeaseLookupList(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ prefix := data.Get("prefix").(string)
+ if prefix != "" && !strings.HasSuffix(prefix, "/") {
+ prefix = prefix + "/"
+ }
+
+ keys, err := b.Core.expiration.idView.List(prefix)
+ if err != nil {
+ b.Backend.Logger().Error("sys: error listing leases", "prefix", prefix, "error", err)
+ return handleError(err)
+ }
+ return logical.ListResponse(keys), nil
+}
+
+// handleRenew is used to renew a lease with a given LeaseID
+func (b *SystemBackend) handleRenew(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ // Get all the options
+ leaseID := data.Get("lease_id").(string)
+ if leaseID == "" {
+ leaseID = data.Get("url_lease_id").(string)
+ }
+ if leaseID == "" {
+ return logical.ErrorResponse("lease_id must be specified"),
+ logical.ErrInvalidRequest
+ }
+ incrementRaw := data.Get("increment").(int)
+
+ // Convert the increment
+ increment := time.Duration(incrementRaw) * time.Second
+
+ // Invoke the expiration manager directly
+ resp, err := b.Core.expiration.Renew(leaseID, increment)
+ if err != nil {
+ b.Backend.Logger().Error("sys: lease renewal failed", "lease_id", leaseID, "error", err)
+ return handleError(err)
+ }
+ return resp, err
+}
+
+// handleRevoke is used to revoke a given LeaseID
+func (b *SystemBackend) handleRevoke(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ // Get all the options
+ leaseID := data.Get("lease_id").(string)
+ if leaseID == "" {
+ leaseID = data.Get("url_lease_id").(string)
+ }
+ if leaseID == "" {
+ return logical.ErrorResponse("lease_id must be specified"),
+ logical.ErrInvalidRequest
+ }
+
+ // Invoke the expiration manager directly
+ if err := b.Core.expiration.Revoke(leaseID); err != nil {
+ b.Backend.Logger().Error("sys: lease revocation failed", "lease_id", leaseID, "error", err)
+ return handleError(err)
+ }
+ return nil, nil
+}
+
+// handleRevokePrefix is used to revoke a prefix with many LeaseIDs
+func (b *SystemBackend) handleRevokePrefix(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ return b.handleRevokePrefixCommon(req, data, false)
+}
+
+// handleRevokeForce is used to revoke a prefix with many LeaseIDs, ignoring errors
+func (b *SystemBackend) handleRevokeForce(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ return b.handleRevokePrefixCommon(req, data, true)
+}
+
+// handleRevokePrefixCommon is used to revoke a prefix with many LeaseIDs
+func (b *SystemBackend) handleRevokePrefixCommon(
+ req *logical.Request, data *framework.FieldData, force bool) (*logical.Response, error) {
+ // Get all the options
+ prefix := data.Get("prefix").(string)
+
+ // Invoke the expiration manager directly
+ var err error
+ if force {
+ err = b.Core.expiration.RevokeForce(prefix)
+ } else {
+ err = b.Core.expiration.RevokePrefix(prefix)
+ }
+ if err != nil {
+ b.Backend.Logger().Error("sys: revoke prefix failed", "prefix", prefix, "error", err)
+ return handleError(err)
+ }
+ return nil, nil
+}
+
+// handleAuthTable handles the "auth" endpoint to provide the auth table
+func (b *SystemBackend) handleAuthTable(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ b.Core.authLock.RLock()
+ defer b.Core.authLock.RUnlock()
+
+ resp := &logical.Response{
+ Data: make(map[string]interface{}),
+ }
+ for _, entry := range b.Core.auth.Entries {
+ info := map[string]interface{}{
+ "type": entry.Type,
+ "description": entry.Description,
+ "config": map[string]interface{}{
+ "default_lease_ttl": int64(entry.Config.DefaultLeaseTTL.Seconds()),
+ "max_lease_ttl": int64(entry.Config.MaxLeaseTTL.Seconds()),
+ },
+ "local": entry.Local,
+ }
+ resp.Data[entry.Path] = info
+ }
+ return resp, nil
+}
+
+// handleEnableAuth is used to enable a new credential backend
+func (b *SystemBackend) handleEnableAuth(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ b.Core.clusterParamsLock.RLock()
+ repState := b.Core.replicationState
+ b.Core.clusterParamsLock.RUnlock()
+
+ local := data.Get("local").(bool)
+ if !local && repState == consts.ReplicationSecondary {
+ return logical.ErrorResponse("cannot add a non-local mount to a replication secondary"), nil
+ }
+
+ // Get all the options
+ path := data.Get("path").(string)
+ logicalType := data.Get("type").(string)
+ description := data.Get("description").(string)
+
+ if logicalType == "" {
+ return logical.ErrorResponse(
+ "backend type must be specified as a string"),
+ logical.ErrInvalidRequest
+ }
+
+ path = sanitizeMountPath(path)
+
+ // Create the mount entry
+ me := &MountEntry{
+ Table: credentialTableType,
+ Path: path,
+ Type: logicalType,
+ Description: description,
+ Local: local,
+ }
+
+ // Attempt enabling
+ if err := b.Core.enableCredential(me); err != nil {
+ b.Backend.Logger().Error("sys: enable auth mount failed", "path", me.Path, "error", err)
+ return handleError(err)
+ }
+ return nil, nil
+}
+
+// handleDisableAuth is used to disable a credential backend
+func (b *SystemBackend) handleDisableAuth(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ suffix := strings.TrimPrefix(req.Path, "auth/")
+ if len(suffix) == 0 {
+ return logical.ErrorResponse("path cannot be blank"), logical.ErrInvalidRequest
+ }
+
+ suffix = sanitizeMountPath(suffix)
+
+ // Attempt disable
+ if existed, err := b.Core.disableCredential(suffix); existed && err != nil {
+ b.Backend.Logger().Error("sys: disable auth mount failed", "path", suffix, "error", err)
+ return handleError(err)
+ }
+ return nil, nil
+}
+
+// handlePolicyList handles the "policy" endpoint to provide the enabled policies
+func (b *SystemBackend) handlePolicyList(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ // Get all the configured policies
+ policies, err := b.Core.policyStore.ListPolicies()
+
+ // Add the special "root" policy
+ policies = append(policies, "root")
+ resp := logical.ListResponse(policies)
+
+ // Backwords compatibility
+ resp.Data["policies"] = resp.Data["keys"]
+
+ return resp, err
+}
+
+// handlePolicyRead handles the "policy/" endpoint to read a policy
+func (b *SystemBackend) handlePolicyRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ name := data.Get("name").(string)
+
+ policy, err := b.Core.policyStore.GetPolicy(name)
+ if err != nil {
+ return handleError(err)
+ }
+
+ if policy == nil {
+ return nil, nil
+ }
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "name": name,
+ "rules": policy.Raw,
+ },
+ }, nil
+}
+
+// handlePolicySet handles the "policy/" endpoint to set a policy
+func (b *SystemBackend) handlePolicySet(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ name := data.Get("name").(string)
+ rules := data.Get("rules").(string)
+
+ // Validate the rules parse
+ parse, err := Parse(rules)
+ if err != nil {
+ return handleError(err)
+ }
+
+ // Override the name
+ parse.Name = strings.ToLower(name)
+
+ // Update the policy
+ if err := b.Core.policyStore.SetPolicy(parse); err != nil {
+ return handleError(err)
+ }
+ return nil, nil
+}
+
+// handlePolicyDelete handles the "policy/" endpoint to delete a policy
+func (b *SystemBackend) handlePolicyDelete(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ name := data.Get("name").(string)
+
+ if err := b.Core.policyStore.DeletePolicy(name); err != nil {
+ return handleError(err)
+ }
+ return nil, nil
+}
+
+// handleAuditTable handles the "audit" endpoint to provide the audit table
+func (b *SystemBackend) handleAuditTable(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ b.Core.auditLock.RLock()
+ defer b.Core.auditLock.RUnlock()
+
+ resp := &logical.Response{
+ Data: make(map[string]interface{}),
+ }
+ for _, entry := range b.Core.audit.Entries {
+ info := map[string]interface{}{
+ "path": entry.Path,
+ "type": entry.Type,
+ "description": entry.Description,
+ "options": entry.Options,
+ "local": entry.Local,
+ }
+ resp.Data[entry.Path] = info
+ }
+ return resp, nil
+}
+
+// handleAuditHash is used to fetch the hash of the given input data with the
+// specified audit backend's salt
+func (b *SystemBackend) handleAuditHash(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ path := data.Get("path").(string)
+ input := data.Get("input").(string)
+ if input == "" {
+ return logical.ErrorResponse("the \"input\" parameter is empty"), nil
+ }
+
+ path = sanitizeMountPath(path)
+
+ hash, err := b.Core.auditBroker.GetHash(path, input)
+ if err != nil {
+ return logical.ErrorResponse(err.Error()), nil
+ }
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "hash": hash,
+ },
+ }, nil
+}
+
+// handleEnableAudit is used to enable a new audit backend
+func (b *SystemBackend) handleEnableAudit(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ b.Core.clusterParamsLock.RLock()
+ repState := b.Core.replicationState
+ b.Core.clusterParamsLock.RUnlock()
+
+ local := data.Get("local").(bool)
+ if !local && repState == consts.ReplicationSecondary {
+ return logical.ErrorResponse("cannot add a non-local mount to a replication secondary"), nil
+ }
+
+ // Get all the options
+ path := data.Get("path").(string)
+ backendType := data.Get("type").(string)
+ description := data.Get("description").(string)
+ options := data.Get("options").(map[string]interface{})
+
+ optionMap := make(map[string]string)
+ for k, v := range options {
+ vStr, ok := v.(string)
+ if !ok {
+ return logical.ErrorResponse("options must be string valued"),
+ logical.ErrInvalidRequest
+ }
+ optionMap[k] = vStr
+ }
+
+ // Create the mount entry
+ me := &MountEntry{
+ Table: auditTableType,
+ Path: path,
+ Type: backendType,
+ Description: description,
+ Options: optionMap,
+ Local: local,
+ }
+
+ // Attempt enabling
+ if err := b.Core.enableAudit(me); err != nil {
+ b.Backend.Logger().Error("sys: enable audit mount failed", "path", me.Path, "error", err)
+ return handleError(err)
+ }
+ return nil, nil
+}
+
+// handleDisableAudit is used to disable an audit backend
+func (b *SystemBackend) handleDisableAudit(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ path := data.Get("path").(string)
+
+ // Attempt disable
+ if existed, err := b.Core.disableAudit(path); existed && err != nil {
+ b.Backend.Logger().Error("sys: disable audit mount failed", "path", path, "error", err)
+ return handleError(err)
+ }
+ return nil, nil
+}
+
+// handleRawRead is used to read directly from the barrier
+func (b *SystemBackend) handleRawRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ path := data.Get("path").(string)
+
+ // Prevent access of protected paths
+ for _, p := range protectedPaths {
+ if strings.HasPrefix(path, p) {
+ err := fmt.Sprintf("cannot read '%s'", path)
+ return logical.ErrorResponse(err), logical.ErrInvalidRequest
+ }
+ }
+
+ entry, err := b.Core.barrier.Get(path)
+ if err != nil {
+ return handleError(err)
+ }
+ if entry == nil {
+ return nil, nil
+ }
+ resp := &logical.Response{
+ Data: map[string]interface{}{
+ "value": string(entry.Value),
+ },
+ }
+ return resp, nil
+}
+
+// handleRawWrite is used to write directly to the barrier
+func (b *SystemBackend) handleRawWrite(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ path := data.Get("path").(string)
+
+ // Prevent access of protected paths
+ for _, p := range protectedPaths {
+ if strings.HasPrefix(path, p) {
+ err := fmt.Sprintf("cannot write '%s'", path)
+ return logical.ErrorResponse(err), logical.ErrInvalidRequest
+ }
+ }
+
+ value := data.Get("value").(string)
+ entry := &Entry{
+ Key: path,
+ Value: []byte(value),
+ }
+ if err := b.Core.barrier.Put(entry); err != nil {
+ return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
+ }
+ return nil, nil
+}
+
+// handleRawDelete is used to delete directly from the barrier
+func (b *SystemBackend) handleRawDelete(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ path := data.Get("path").(string)
+
+ // Prevent access of protected paths
+ for _, p := range protectedPaths {
+ if strings.HasPrefix(path, p) {
+ err := fmt.Sprintf("cannot delete '%s'", path)
+ return logical.ErrorResponse(err), logical.ErrInvalidRequest
+ }
+ }
+
+ if err := b.Core.barrier.Delete(path); err != nil {
+ return handleError(err)
+ }
+ return nil, nil
+}
+
+// handleKeyStatus returns status information about the backend key
+func (b *SystemBackend) handleKeyStatus(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ // Get the key info
+ info, err := b.Core.barrier.ActiveKeyInfo()
+ if err != nil {
+ return nil, err
+ }
+
+ resp := &logical.Response{
+ Data: map[string]interface{}{
+ "term": info.Term,
+ "install_time": info.InstallTime.Format(time.RFC3339Nano),
+ },
+ }
+ return resp, nil
+}
+
+// handleRotate is used to trigger a key rotation
+func (b *SystemBackend) handleRotate(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ b.Core.clusterParamsLock.RLock()
+ repState := b.Core.replicationState
+ b.Core.clusterParamsLock.RUnlock()
+ if repState == consts.ReplicationSecondary {
+ return logical.ErrorResponse("cannot rotate on a replication secondary"), nil
+ }
+
+ // Rotate to the new term
+ newTerm, err := b.Core.barrier.Rotate()
+ if err != nil {
+ b.Backend.Logger().Error("sys: failed to create new encryption key", "error", err)
+ return handleError(err)
+ }
+ b.Backend.Logger().Info("sys: installed new encryption key")
+
+ // In HA mode, we need to an upgrade path for the standby instances
+ if b.Core.ha != nil {
+ // Create the upgrade path to the new term
+ if err := b.Core.barrier.CreateUpgrade(newTerm); err != nil {
+ b.Backend.Logger().Error("sys: failed to create new upgrade", "term", newTerm, "error", err)
+ }
+
+ // Schedule the destroy of the upgrade path
+ time.AfterFunc(keyRotateGracePeriod, func() {
+ if err := b.Core.barrier.DestroyUpgrade(newTerm); err != nil {
+ b.Backend.Logger().Error("sys: failed to destroy upgrade", "term", newTerm, "error", err)
+ }
+ })
+ }
+
+ // Write to the canary path, which will force a synchronous truing during
+ // replication
+ if err := b.Core.barrier.Put(&Entry{
+ Key: coreKeyringCanaryPath,
+ Value: []byte(fmt.Sprintf("new-rotation-term-%d", newTerm)),
+ }); err != nil {
+ b.Core.logger.Error("core: error saving keyring canary", "error", err)
+ return nil, fmt.Errorf("failed to save keyring canary: %v", err)
+ }
+
+ return nil, nil
+}
+
+func (b *SystemBackend) handleWrappingPubkey(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ x, _ := b.Core.wrappingJWTKey.X.MarshalText()
+ y, _ := b.Core.wrappingJWTKey.Y.MarshalText()
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "jwt_x": string(x),
+ "jwt_y": string(y),
+ "jwt_curve": corePrivateKeyTypeP521,
+ },
+ }, nil
+}
+
+func (b *SystemBackend) handleWrappingWrap(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ if req.WrapInfo == nil || req.WrapInfo.TTL == 0 {
+ return logical.ErrorResponse("endpoint requires response wrapping to be used"), logical.ErrInvalidRequest
+ }
+
+ // N.B.: Do *NOT* allow JWT wrapping tokens to be created through this
+ // endpoint. JWTs are signed so if we don't allow users to create wrapping
+ // tokens using them we can ensure that an operator can't spoof a legit JWT
+ // wrapped token, which makes certain init/rekey/generate-root cases have
+ // better properties.
+ req.WrapInfo.Format = "uuid"
+
+ return &logical.Response{
+ Data: data.Raw,
+ }, nil
+}
+
+func (b *SystemBackend) handleWrappingUnwrap(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ // If a third party is unwrapping (rather than the calling token being the
+ // wrapping token) we detect this so that we can revoke the original
+ // wrapping token after reading it
+ var thirdParty bool
+
+ token := data.Get("token").(string)
+ if token != "" {
+ thirdParty = true
+ } else {
+ token = req.ClientToken
+ }
+
+ if thirdParty {
+ // Use the token to decrement the use count to avoid a second operation on the token.
+ _, err := b.Core.tokenStore.UseTokenByID(token)
+ if err != nil {
+ return nil, fmt.Errorf("error decrementing wrapping token's use-count: %v", err)
+ }
+
+ defer b.Core.tokenStore.Revoke(token)
+ }
+
+ cubbyReq := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "cubbyhole/response",
+ ClientToken: token,
+ }
+ cubbyResp, err := b.Core.router.Route(cubbyReq)
+ if err != nil {
+ return nil, fmt.Errorf("error looking up wrapping information: %v", err)
+ }
+ if cubbyResp == nil {
+ return logical.ErrorResponse("no information found; wrapping token may be from a previous Vault version"), nil
+ }
+ if cubbyResp != nil && cubbyResp.IsError() {
+ return cubbyResp, nil
+ }
+ if cubbyResp.Data == nil {
+ return logical.ErrorResponse("wrapping information was nil; wrapping token may be from a previous Vault version"), nil
+ }
+
+ responseRaw := cubbyResp.Data["response"]
+ if responseRaw == nil {
+ return nil, fmt.Errorf("no response found inside the cubbyhole")
+ }
+ response, ok := responseRaw.(string)
+ if !ok {
+ return nil, fmt.Errorf("could not decode response inside the cubbyhole")
+ }
+
+ resp := &logical.Response{
+ Data: map[string]interface{}{},
+ }
+ if len(response) == 0 {
+ resp.Data[logical.HTTPStatusCode] = 204
+ } else {
+ resp.Data[logical.HTTPStatusCode] = 200
+ resp.Data[logical.HTTPRawBody] = []byte(response)
+ resp.Data[logical.HTTPContentType] = "application/json"
+ }
+
+ return resp, nil
+}
+
+func (b *SystemBackend) handleWrappingLookup(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ token := data.Get("token").(string)
+
+ if token == "" {
+ return logical.ErrorResponse("missing \"token\" value in input"), logical.ErrInvalidRequest
+ }
+
+ cubbyReq := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "cubbyhole/wrapinfo",
+ ClientToken: token,
+ }
+ cubbyResp, err := b.Core.router.Route(cubbyReq)
+ if err != nil {
+ return nil, fmt.Errorf("error looking up wrapping information: %v", err)
+ }
+ if cubbyResp == nil {
+ return logical.ErrorResponse("no information found; wrapping token may be from a previous Vault version"), nil
+ }
+ if cubbyResp != nil && cubbyResp.IsError() {
+ return cubbyResp, nil
+ }
+ if cubbyResp.Data == nil {
+ return logical.ErrorResponse("wrapping information was nil; wrapping token may be from a previous Vault version"), nil
+ }
+
+ creationTTLRaw := cubbyResp.Data["creation_ttl"]
+ creationTime := cubbyResp.Data["creation_time"]
+
+ resp := &logical.Response{
+ Data: map[string]interface{}{},
+ }
+ if creationTTLRaw != nil {
+ creationTTL, err := creationTTLRaw.(json.Number).Int64()
+ if err != nil {
+ return nil, fmt.Errorf("error reading creation_ttl value from wrapping information: %v", err)
+ }
+ resp.Data["creation_ttl"] = time.Duration(creationTTL).Seconds()
+ }
+ if creationTime != nil {
+ // This was JSON marshaled so it's already a string in RFC3339 format
+ resp.Data["creation_time"] = cubbyResp.Data["creation_time"]
+ }
+
+ return resp, nil
+}
+
+func (b *SystemBackend) handleWrappingRewrap(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ // If a third party is rewrapping (rather than the calling token being the
+ // wrapping token) we detect this so that we can revoke the original
+ // wrapping token after reading it. Right now wrapped tokens can't unwrap
+ // themselves, but in case we change it, this will be ready to do the right
+ // thing.
+ var thirdParty bool
+
+ token := data.Get("token").(string)
+ if token != "" {
+ thirdParty = true
+ } else {
+ token = req.ClientToken
+ }
+
+ if thirdParty {
+ // Use the token to decrement the use count to avoid a second operation on the token.
+ _, err := b.Core.tokenStore.UseTokenByID(token)
+ if err != nil {
+ return nil, fmt.Errorf("error decrementing wrapping token's use-count: %v", err)
+ }
+ defer b.Core.tokenStore.Revoke(token)
+ }
+
+ // Fetch the original TTL
+ cubbyReq := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "cubbyhole/wrapinfo",
+ ClientToken: token,
+ }
+ cubbyResp, err := b.Core.router.Route(cubbyReq)
+ if err != nil {
+ return nil, fmt.Errorf("error looking up wrapping information: %v", err)
+ }
+ if cubbyResp == nil {
+ return logical.ErrorResponse("no information found; wrapping token may be from a previous Vault version"), nil
+ }
+ if cubbyResp != nil && cubbyResp.IsError() {
+ return cubbyResp, nil
+ }
+ if cubbyResp.Data == nil {
+ return logical.ErrorResponse("wrapping information was nil; wrapping token may be from a previous Vault version"), nil
+ }
+
+ // Set the creation TTL on the request
+ creationTTLRaw := cubbyResp.Data["creation_ttl"]
+ if creationTTLRaw == nil {
+ return nil, fmt.Errorf("creation_ttl value in wrapping information was nil")
+ }
+ creationTTL, err := cubbyResp.Data["creation_ttl"].(json.Number).Int64()
+ if err != nil {
+ return nil, fmt.Errorf("error reading creation_ttl value from wrapping information: %v", err)
+ }
+
+ // Fetch the original response and return it as the data for the new response
+ cubbyReq = &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "cubbyhole/response",
+ ClientToken: token,
+ }
+ cubbyResp, err = b.Core.router.Route(cubbyReq)
+ if err != nil {
+ return nil, fmt.Errorf("error looking up response: %v", err)
+ }
+ if cubbyResp == nil {
+ return logical.ErrorResponse("no information found; wrapping token may be from a previous Vault version"), nil
+ }
+ if cubbyResp != nil && cubbyResp.IsError() {
+ return cubbyResp, nil
+ }
+ if cubbyResp.Data == nil {
+ return logical.ErrorResponse("wrapping information was nil; wrapping token may be from a previous Vault version"), nil
+ }
+
+ response := cubbyResp.Data["response"]
+ if response == nil {
+ return nil, fmt.Errorf("no response found inside the cubbyhole")
+ }
+
+ // Return response in "response"; wrapping code will detect the rewrap and
+ // slot in instead of nesting
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "response": response,
+ },
+ WrapInfo: &logical.ResponseWrapInfo{
+ TTL: time.Duration(creationTTL),
+ },
+ }, nil
+}
+
+func sanitizeMountPath(path string) string {
+ if !strings.HasSuffix(path, "/") {
+ path += "/"
+ }
+
+ if strings.HasPrefix(path, "/") {
+ path = path[1:]
+ }
+
+ return path
+}
+
+const sysHelpRoot = `
+The system backend is built-in to Vault and cannot be remounted or
+unmounted. It contains the paths that are used to configure Vault itself
+as well as perform core operations.
+`
+
+// sysHelp is all the help text for the sys backend.
+var sysHelp = map[string][2]string{
+ "init": {
+ "Initializes or returns the initialization status of the Vault.",
+ `
+This path responds to the following HTTP methods.
+
+ GET /
+ Returns the initialization status of the Vault.
+
+ POST /
+ Initializes a new vault.
+ `,
+ },
+ "generate-root": {
+ "Reads, generates, or deletes a root token regeneration process.",
+ `
+This path responds to multiple HTTP methods which change the behavior. Those
+HTTP methods are listed below.
+
+ GET /attempt
+ Reads the configuration and progress of the current root generation
+ attempt.
+
+ POST /attempt
+ Initializes a new root generation attempt. Only a single root generation
+ attempt can take place at a time. One (and only one) of otp or pgp_key
+ are required.
+
+ DELETE /attempt
+ Cancels any in-progress root generation attempt. This clears any
+ progress made. This must be called to change the OTP or PGP key being
+ used.
+ `,
+ },
+ "seal-status": {
+ "Returns the seal status of the Vault.",
+ `
+This path responds to the following HTTP methods.
+
+ GET /
+ Returns the seal status of the Vault. This is an unauthenticated
+ endpoint.
+ `,
+ },
+ "seal": {
+ "Seals the Vault.",
+ `
+This path responds to the following HTTP methods.
+
+ PUT /
+ Seals the Vault.
+ `,
+ },
+ "unseal": {
+ "Unseals the Vault.",
+ `
+This path responds to the following HTTP methods.
+
+ PUT /
+ Unseals the Vault.
+ `,
+ },
+ "mounts": {
+ "List the currently mounted backends.",
+ `
+This path responds to the following HTTP methods.
+
+ GET /
+ Lists all the mounted secret backends.
+
+ GET /
+ Get information about the mount at the specified path.
+
+ POST /
+ Mount a new secret backend to the mount point in the URL.
+
+ POST //tune
+ Tune configuration parameters for the given mount point.
+
+ DELETE /
+ Unmount the specified mount point.
+ `,
+ },
+
+ "mount": {
+ `Mount a new backend at a new path.`,
+ `
+Mount a backend at a new path. A backend can be mounted multiple times at
+multiple paths in order to configure multiple separately configured backends.
+Example: you might have an AWS backend for the east coast, and one for the
+west coast.
+ `,
+ },
+
+ "mount_path": {
+ `The path to mount to. Example: "aws/east"`,
+ "",
+ },
+
+ "mount_type": {
+ `The type of the backend. Example: "passthrough"`,
+ "",
+ },
+
+ "mount_desc": {
+ `User-friendly description for this mount.`,
+ "",
+ },
+
+ "mount_config": {
+ `Configuration for this mount, such as default_lease_ttl
+and max_lease_ttl.`,
+ },
+
+ "mount_local": {
+ `Mark the mount as a local mount, which is not replicated
+and is unaffected by replication.`,
+ },
+
+ "tune_default_lease_ttl": {
+ `The default lease TTL for this mount.`,
+ },
+
+ "tune_max_lease_ttl": {
+ `The max lease TTL for this mount.`,
+ },
+
+ "remount": {
+ "Move the mount point of an already-mounted backend.",
+ `
+This path responds to the following HTTP methods.
+
+ POST /sys/remount
+ Changes the mount point of an already-mounted backend.
+ `,
+ },
+
+ "auth_tune": {
+ "Tune the configuration parameters for an auth path.",
+ `Read and write the 'default-lease-ttl' and 'max-lease-ttl' values of
+the auth path.`,
+ },
+
+ "mount_tune": {
+ "Tune backend configuration parameters for this mount.",
+ `Read and write the 'default-lease-ttl' and 'max-lease-ttl' values of
+the mount.`,
+ },
+
+ "renew": {
+ "Renew a lease on a secret",
+ `
+When a secret is read, it may optionally include a lease interval
+and a boolean indicating if renew is possible. For secrets that support
+lease renewal, this endpoint is used to extend the validity of the
+lease and to prevent an automatic revocation.
+ `,
+ },
+
+ "lease_id": {
+ "The lease identifier to renew. This is included with a lease.",
+ "",
+ },
+
+ "increment": {
+ "The desired increment in seconds to the lease",
+ "",
+ },
+
+ "revoke": {
+ "Revoke a leased secret immediately",
+ `
+When a secret is generated with a lease, it is automatically revoked
+at the end of the lease period if not renewed. However, in some cases
+you may want to force an immediate revocation. This endpoint can be
+used to revoke the secret with the given Lease ID.
+ `,
+ },
+
+ "revoke-prefix": {
+ "Revoke all secrets generated in a given prefix",
+ `
+Revokes all the secrets generated under a given mount prefix. As
+an example, "prod/aws/" might be the AWS logical backend, and due to
+a change in the "ops" policy, we may want to invalidate all the secrets
+generated. We can do a revoke prefix at "prod/aws/ops" to revoke all
+the ops secrets. This does a prefix match on the Lease IDs and revokes
+all matching leases.
+ `,
+ },
+
+ "revoke-prefix-path": {
+ `The path to revoke keys under. Example: "prod/aws/ops"`,
+ "",
+ },
+
+ "revoke-force": {
+ "Revoke all secrets generated in a given prefix, ignoring errors.",
+ `
+See the path help for 'revoke-prefix'; this behaves the same, except that it
+ignores errors encountered during revocation. This can be used in certain
+recovery situations; for instance, when you want to unmount a backend, but it
+is impossible to fix revocation errors and these errors prevent the unmount
+from proceeding. This is a DANGEROUS operation as it removes Vault's oversight
+of external secrets. Access to this prefix should be tightly controlled.
+ `,
+ },
+
+ "revoke-force-path": {
+ `The path to revoke keys under. Example: "prod/aws/ops"`,
+ "",
+ },
+
+ "auth-table": {
+ "List the currently enabled credential backends.",
+ `
+This path responds to the following HTTP methods.
+
+ GET /
+ List the currently enabled credential backends: the name, the type of
+ the backend, and a user friendly description of the purpose for the
+ credential backend.
+
+ POST /
+ Enable a new auth backend.
+
+ DELETE /
+ Disable the auth backend at the given mount point.
+ `,
+ },
+
+ "auth": {
+ `Enable a new credential backend with a name.`,
+ `
+Enable a credential mechanism at a new path. A backend can be mounted multiple times at
+multiple paths in order to configure multiple separately configured backends.
+Example: you might have an OAuth backend for GitHub, and one for Google Apps.
+ `,
+ },
+
+ "auth_path": {
+ `The path to mount to. Cannot be delimited. Example: "user"`,
+ "",
+ },
+
+ "auth_type": {
+ `The type of the backend. Example: "userpass"`,
+ "",
+ },
+
+ "auth_desc": {
+ `User-friendly description for this crential backend.`,
+ "",
+ },
+
+ "policy-list": {
+ `List the configured access control policies.`,
+ `
+This path responds to the following HTTP methods.
+
+ GET /
+ List the names of the configured access control policies.
+
+ GET /
+ Retrieve the rules for the named policy.
+
+ PUT /
+ Add or update a policy.
+
+ DELETE /
+ Delete the policy with the given name.
+ `,
+ },
+
+ "policy": {
+ `Read, Modify, or Delete an access control policy.`,
+ `
+Read the rules of an existing policy, create or update the rules of a policy,
+or delete a policy.
+ `,
+ },
+
+ "policy-name": {
+ `The name of the policy. Example: "ops"`,
+ "",
+ },
+
+ "policy-rules": {
+ `The rules of the policy. Either given in HCL or JSON format.`,
+ "",
+ },
+
+ "audit-hash": {
+ "The hash of the given string via the given audit backend",
+ "",
+ },
+
+ "audit-table": {
+ "List the currently enabled audit backends.",
+ `
+This path responds to the following HTTP methods.
+
+ GET /
+ List the currently enabled audit backends.
+
+ PUT /
+ Enable an audit backend at the given path.
+
+ DELETE /
+ Disable the given audit backend.
+ `,
+ },
+
+ "audit_path": {
+ `The name of the backend. Cannot be delimited. Example: "mysql"`,
+ "",
+ },
+
+ "audit_type": {
+ `The type of the backend. Example: "mysql"`,
+ "",
+ },
+
+ "audit_desc": {
+ `User-friendly description for this audit backend.`,
+ "",
+ },
+
+ "audit_opts": {
+ `Configuration options for the audit backend.`,
+ "",
+ },
+
+ "audit": {
+ `Enable or disable audit backends.`,
+ `
+Enable a new audit backend or disable an existing backend.
+ `,
+ },
+
+ "key-status": {
+ "Provides information about the backend encryption key.",
+ `
+ Provides the current backend encryption key term and installation time.
+ `,
+ },
+
+ "rotate": {
+ "Rotates the backend encryption key used to persist data.",
+ `
+ Rotate generates a new encryption key which is used to encrypt all
+ data going to the storage backend. The old encryption keys are kept so
+ that data encrypted using those keys can still be decrypted.
+ `,
+ },
+
+ "rekey_backup": {
+ "Allows fetching or deleting the backup of the rotated unseal keys.",
+ "",
+ },
+
+ "capabilities": {
+ "Fetches the capabilities of the given token on the given path.",
+ `Returns the capabilities of the given token on the path.
+ The path will be searched for a path match in all the policies associated with the token.`,
+ },
+
+ "capabilities_self": {
+ "Fetches the capabilities of the given token on the given path.",
+ `Returns the capabilities of the client token on the path.
+ The path will be searched for a path match in all the policies associated with the client token.`,
+ },
+
+ "capabilities_accessor": {
+ "Fetches the capabilities of the token associated with the given token, on the given path.",
+ `When there is no access to the token, token accessor can be used to fetch the token's capabilities
+ on a given path.`,
+ },
+
+ "wrap": {
+ "Response-wraps an arbitrary JSON object.",
+ `Round trips the given input data into a response-wrapped token.`,
+ },
+
+ "wrappubkey": {
+ "Returns pubkeys used in some wrapping formats.",
+ "Returns pubkeys used in some wrapping formats.",
+ },
+
+ "unwrap": {
+ "Unwraps a response-wrapped token.",
+ `Unwraps a response-wrapped token. Unlike simply reading from cubbyhole/response,
+ this provides additional validation on the token, and rather than a JSON-escaped
+ string, the returned response is the exact same as the contained wrapped response.`,
+ },
+
+ "wraplookup": {
+ "Looks up the properties of a response-wrapped token.",
+ `Returns the creation TTL and creation time of a response-wrapped token.`,
+ },
+
+ "rewrap": {
+ "Rotates a response-wrapped token.",
+ `Rotates a response-wrapped token; the output is a new token with the same
+ response wrapped inside and the same creation TTL. The original token is revoked.`,
+ },
+ "audited-headers-name": {
+ "Configures the headers sent to the audit logs.",
+ `
+This path responds to the following HTTP methods.
+
+ GET /
+ Returns the setting for the header with the given name.
+
+ POST /
+ Enable auditing of the given header.
+
+ DELETE /
+ Disable auditing of the given header.
+ `,
+ },
+ "audited-headers": {
+ "Lists the headers configured to be audited.",
+ `Returns a list of headers that have been configured to be audited.`,
+ },
+
+ "leases": {
+ `View or list lease metadata.`,
+ `
+This path responds to the following HTTP methods.
+
+ PUT /
+ Retrieve the metadata for the provided lease id.
+
+ LIST /
+ Lists the leases for the named prefix.
+ `,
+ },
+
+ "leases-list-prefix": {
+ `The path to list leases under. Example: "aws/creds/deploy"`,
+ "",
+ },
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/logical_system_helpers.go b/vendor/github.com/hashicorp/vault/vault/logical_system_helpers.go
new file mode 100644
index 0000000..809ebb9
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/logical_system_helpers.go
@@ -0,0 +1,85 @@
+package vault
+
+import (
+ "fmt"
+ "strings"
+ "time"
+)
+
+// tuneMount is used to set config on a mount point
+func (b *SystemBackend) tuneMountTTLs(path string, me *MountEntry, newDefault, newMax *time.Duration) error {
+ meConfig := &me.Config
+
+ if newDefault == nil && newMax == nil {
+ return nil
+ }
+ if newDefault == nil && newMax != nil &&
+ *newMax == meConfig.MaxLeaseTTL {
+ return nil
+ }
+ if newMax == nil && newDefault != nil &&
+ *newDefault == meConfig.DefaultLeaseTTL {
+ return nil
+ }
+ if newMax != nil && newDefault != nil &&
+ *newDefault == meConfig.DefaultLeaseTTL &&
+ *newMax == meConfig.MaxLeaseTTL {
+ return nil
+ }
+
+ if newMax != nil && newDefault != nil && *newMax < *newDefault {
+ return fmt.Errorf("new backend max lease TTL of %d less than new backend default lease TTL of %d",
+ int(newMax.Seconds()), int(newDefault.Seconds()))
+ }
+
+ if newMax != nil && newDefault == nil {
+ if meConfig.DefaultLeaseTTL != 0 && *newMax < meConfig.DefaultLeaseTTL {
+ return fmt.Errorf("new backend max lease TTL of %d less than backend default lease TTL of %d",
+ int(newMax.Seconds()), int(meConfig.DefaultLeaseTTL.Seconds()))
+ }
+ }
+
+ if newDefault != nil {
+ if meConfig.MaxLeaseTTL == 0 {
+ if newMax == nil && *newDefault > b.Core.maxLeaseTTL {
+ return fmt.Errorf("new backend default lease TTL of %d greater than system max lease TTL of %d",
+ int(newDefault.Seconds()), int(b.Core.maxLeaseTTL.Seconds()))
+ }
+ } else {
+ if newMax == nil && *newDefault > meConfig.MaxLeaseTTL {
+ return fmt.Errorf("new backend default lease TTL of %d greater than backend max lease TTL of %d",
+ int(newDefault.Seconds()), int(meConfig.MaxLeaseTTL.Seconds()))
+ }
+ }
+ }
+
+ origMax := meConfig.MaxLeaseTTL
+ origDefault := meConfig.DefaultLeaseTTL
+
+ if newMax != nil {
+ meConfig.MaxLeaseTTL = *newMax
+ }
+ if newDefault != nil {
+ meConfig.DefaultLeaseTTL = *newDefault
+ }
+
+ // Update the mount table
+ var err error
+ switch {
+ case strings.HasPrefix(path, "auth/"):
+ err = b.Core.persistAuth(b.Core.auth, me.Local)
+ default:
+ err = b.Core.persistMounts(b.Core.mounts, me.Local)
+ }
+ if err != nil {
+ meConfig.MaxLeaseTTL = origMax
+ meConfig.DefaultLeaseTTL = origDefault
+ return fmt.Errorf("failed to update mount table, rolling back TTL changes")
+ }
+
+ if b.Core.logger.IsInfo() {
+ b.Core.logger.Info("core: mount tuning successful", "path", path)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/logical_system_test.go b/vendor/github.com/hashicorp/vault/vault/logical_system_test.go
new file mode 100644
index 0000000..4f3f70f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/logical_system_test.go
@@ -0,0 +1,1545 @@
+package vault
+
+import (
+ "crypto/sha256"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/fatih/structs"
+ "github.com/hashicorp/vault/audit"
+ "github.com/hashicorp/vault/helper/salt"
+ "github.com/hashicorp/vault/logical"
+ "github.com/mitchellh/mapstructure"
+)
+
+func TestSystemBackend_RootPaths(t *testing.T) {
+ expected := []string{
+ "auth/*",
+ "remount",
+ "audit",
+ "audit/*",
+ "raw/*",
+ "replication/primary/secondary-token",
+ "replication/reindex",
+ "rotate",
+ "config/auditing/*",
+ "revoke-prefix/*",
+ "leases/revoke-prefix/*",
+ "leases/revoke-force/*",
+ "leases/lookup/*",
+ }
+
+ b := testSystemBackend(t)
+ actual := b.SpecialPaths().Root
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: %#v", actual)
+ }
+}
+
+func TestSystemBackend_mounts(t *testing.T) {
+ b := testSystemBackend(t)
+ req := logical.TestRequest(t, logical.ReadOperation, "mounts")
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // We can't know the pointer address ahead of time so simply
+ // copy what's given
+ exp := map[string]interface{}{
+ "secret/": map[string]interface{}{
+ "type": "generic",
+ "description": "generic secret storage",
+ "config": map[string]interface{}{
+ "default_lease_ttl": resp.Data["secret/"].(map[string]interface{})["config"].(map[string]interface{})["default_lease_ttl"].(int64),
+ "max_lease_ttl": resp.Data["secret/"].(map[string]interface{})["config"].(map[string]interface{})["max_lease_ttl"].(int64),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "sys/": map[string]interface{}{
+ "type": "system",
+ "description": "system endpoints used for control, policy and debugging",
+ "config": map[string]interface{}{
+ "default_lease_ttl": resp.Data["sys/"].(map[string]interface{})["config"].(map[string]interface{})["default_lease_ttl"].(int64),
+ "max_lease_ttl": resp.Data["sys/"].(map[string]interface{})["config"].(map[string]interface{})["max_lease_ttl"].(int64),
+ "force_no_cache": false,
+ },
+ "local": false,
+ },
+ "cubbyhole/": map[string]interface{}{
+ "description": "per-token private secret storage",
+ "type": "cubbyhole",
+ "config": map[string]interface{}{
+ "default_lease_ttl": resp.Data["cubbyhole/"].(map[string]interface{})["config"].(map[string]interface{})["default_lease_ttl"].(int64),
+ "max_lease_ttl": resp.Data["cubbyhole/"].(map[string]interface{})["config"].(map[string]interface{})["max_lease_ttl"].(int64),
+ "force_no_cache": false,
+ },
+ "local": true,
+ },
+ }
+ if !reflect.DeepEqual(resp.Data, exp) {
+ t.Fatalf("Got:\n%#v\nExpected:\n%#v", resp.Data, exp)
+ }
+}
+
+func TestSystemBackend_mount(t *testing.T) {
+ b := testSystemBackend(t)
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "mounts/prod/secret/")
+ req.Data["type"] = "generic"
+
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+}
+
+func TestSystemBackend_mount_force_no_cache(t *testing.T) {
+ core, b, _ := testCoreSystemBackend(t)
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "mounts/prod/secret/")
+ req.Data["type"] = "generic"
+ req.Data["config"] = map[string]interface{}{
+ "force_no_cache": true,
+ }
+
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+
+ mountEntry := core.router.MatchingMountEntry("prod/secret/")
+ if mountEntry == nil {
+ t.Fatalf("missing mount entry")
+ }
+ if !mountEntry.Config.ForceNoCache {
+ t.Fatalf("bad config %#v", mountEntry)
+ }
+}
+
+func TestSystemBackend_mount_invalid(t *testing.T) {
+ b := testSystemBackend(t)
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "mounts/prod/secret/")
+ req.Data["type"] = "nope"
+ resp, err := b.HandleRequest(req)
+ if err != logical.ErrInvalidRequest {
+ t.Fatalf("err: %v", err)
+ }
+ if resp.Data["error"] != "unknown backend type: nope" {
+ t.Fatalf("bad: %v", resp)
+ }
+}
+
+func TestSystemBackend_unmount(t *testing.T) {
+ b := testSystemBackend(t)
+
+ req := logical.TestRequest(t, logical.DeleteOperation, "mounts/secret/")
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+}
+
+var capabilitiesPolicy = `
+name = "test"
+path "foo/bar*" {
+ capabilities = ["create", "sudo", "update"]
+}
+path "sys/capabilities*" {
+ capabilities = ["update"]
+}
+`
+
+func TestSystemBackend_Capabilities(t *testing.T) {
+ testCapabilities(t, "capabilities")
+ testCapabilities(t, "capabilities-self")
+}
+
+func testCapabilities(t *testing.T, endpoint string) {
+ core, b, rootToken := testCoreSystemBackend(t)
+ req := logical.TestRequest(t, logical.UpdateOperation, endpoint)
+ req.Data["token"] = rootToken
+ req.Data["path"] = "any_path"
+
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil {
+ t.Fatalf("bad: %v", resp)
+ }
+
+ actual := resp.Data["capabilities"]
+ expected := []string{"root"}
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: got\n%#v\nexpected\n%#v\n", actual, expected)
+ }
+
+ policy, _ := Parse(capabilitiesPolicy)
+ err = core.policyStore.SetPolicy(policy)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ testMakeToken(t, core.tokenStore, rootToken, "tokenid", "", []string{"test"})
+ req = logical.TestRequest(t, logical.UpdateOperation, endpoint)
+ req.Data["token"] = "tokenid"
+ req.Data["path"] = "foo/bar"
+
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil {
+ t.Fatalf("bad: %v", resp)
+ }
+
+ actual = resp.Data["capabilities"]
+ expected = []string{"create", "sudo", "update"}
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: got\n%#v\nexpected\n%#v\n", actual, expected)
+ }
+}
+
+func TestSystemBackend_CapabilitiesAccessor(t *testing.T) {
+ core, b, rootToken := testCoreSystemBackend(t)
+ te, err := core.tokenStore.Lookup(rootToken)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "capabilities-accessor")
+ // Accessor of root token
+ req.Data["accessor"] = te.Accessor
+ req.Data["path"] = "any_path"
+
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil {
+ t.Fatalf("bad: %v", resp)
+ }
+
+ actual := resp.Data["capabilities"]
+ expected := []string{"root"}
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: got\n%#v\nexpected\n%#v\n", actual, expected)
+ }
+
+ policy, _ := Parse(capabilitiesPolicy)
+ err = core.policyStore.SetPolicy(policy)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ testMakeToken(t, core.tokenStore, rootToken, "tokenid", "", []string{"test"})
+
+ te, err = core.tokenStore.Lookup("tokenid")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ req = logical.TestRequest(t, logical.UpdateOperation, "capabilities-accessor")
+ req.Data["accessor"] = te.Accessor
+ req.Data["path"] = "foo/bar"
+
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil {
+ t.Fatalf("bad: %v", resp)
+ }
+
+ actual = resp.Data["capabilities"]
+ expected = []string{"create", "sudo", "update"}
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: got\n%#v\nexpected\n%#v\n", actual, expected)
+ }
+}
+
+func TestSystemBackend_remount(t *testing.T) {
+ b := testSystemBackend(t)
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "remount")
+ req.Data["from"] = "secret"
+ req.Data["to"] = "foo"
+ req.Data["config"] = structs.Map(MountConfig{})
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+}
+
+func TestSystemBackend_remount_invalid(t *testing.T) {
+ b := testSystemBackend(t)
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "remount")
+ req.Data["from"] = "unknown"
+ req.Data["to"] = "foo"
+ req.Data["config"] = structs.Map(MountConfig{})
+ resp, err := b.HandleRequest(req)
+ if err != logical.ErrInvalidRequest {
+ t.Fatalf("err: %v", err)
+ }
+ if resp.Data["error"] != "no matching mount at 'unknown/'" {
+ t.Fatalf("bad: %v", resp)
+ }
+}
+
+func TestSystemBackend_remount_system(t *testing.T) {
+ b := testSystemBackend(t)
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "remount")
+ req.Data["from"] = "sys"
+ req.Data["to"] = "foo"
+ resp, err := b.HandleRequest(req)
+ if err != logical.ErrInvalidRequest {
+ t.Fatalf("err: %v", err)
+ }
+ if resp.Data["error"] != "cannot remount 'sys/'" {
+ t.Fatalf("bad: %v", resp)
+ }
+}
+
+func TestSystemBackend_leases(t *testing.T) {
+ core, b, root := testCoreSystemBackend(t)
+
+ // Create a key with a lease
+ req := logical.TestRequest(t, logical.UpdateOperation, "secret/foo")
+ req.Data["foo"] = "bar"
+ req.ClientToken = root
+ resp, err := core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Read a key with a LeaseID
+ req = logical.TestRequest(t, logical.ReadOperation, "secret/foo")
+ req.ClientToken = root
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil || resp.Secret == nil || resp.Secret.LeaseID == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Read lease
+ req = logical.TestRequest(t, logical.UpdateOperation, "leases/lookup")
+ req.Data["lease_id"] = resp.Secret.LeaseID
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp.Data["renewable"] == nil || resp.Data["renewable"].(bool) {
+ t.Fatal("generic leases are not renewable")
+ }
+
+ // Invalid lease
+ req = logical.TestRequest(t, logical.UpdateOperation, "leases/lookup")
+ req.Data["lease_id"] = "invalid"
+ resp, err = b.HandleRequest(req)
+ if err != logical.ErrInvalidRequest {
+ t.Fatalf("expected invalid request, got err: %v", err)
+ }
+}
+
+func TestSystemBackend_leases_list(t *testing.T) {
+ core, b, root := testCoreSystemBackend(t)
+
+ // Create a key with a lease
+ req := logical.TestRequest(t, logical.UpdateOperation, "secret/foo")
+ req.Data["foo"] = "bar"
+ req.ClientToken = root
+ resp, err := core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Read a key with a LeaseID
+ req = logical.TestRequest(t, logical.ReadOperation, "secret/foo")
+ req.ClientToken = root
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil || resp.Secret == nil || resp.Secret.LeaseID == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // List top level
+ req = logical.TestRequest(t, logical.ListOperation, "leases/lookup/")
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil || resp.Data == nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+ var keys []string
+ if err := mapstructure.WeakDecode(resp.Data["keys"], &keys); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(keys) != 1 {
+ t.Fatalf("Expected 1 subkey lease, got %d: %#v", len(keys), keys)
+ }
+ if keys[0] != "secret/" {
+ t.Fatal("Expected only secret subkey")
+ }
+
+ // List lease
+ req = logical.TestRequest(t, logical.ListOperation, "leases/lookup/secret/foo")
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil || resp.Data == nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+ keys = []string{}
+ if err := mapstructure.WeakDecode(resp.Data["keys"], &keys); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(keys) != 1 {
+ t.Fatalf("Expected 1 secret lease, got %d: %#v", len(keys), keys)
+ }
+
+ // Generate multiple leases
+ req = logical.TestRequest(t, logical.ReadOperation, "secret/foo")
+ req.ClientToken = root
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil || resp.Secret == nil || resp.Secret.LeaseID == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ req = logical.TestRequest(t, logical.ReadOperation, "secret/foo")
+ req.ClientToken = root
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil || resp.Secret == nil || resp.Secret.LeaseID == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ req = logical.TestRequest(t, logical.ListOperation, "leases/lookup/secret/foo")
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil || resp.Data == nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+ keys = []string{}
+ if err := mapstructure.WeakDecode(resp.Data["keys"], &keys); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(keys) != 3 {
+ t.Fatalf("Expected 3 secret lease, got %d: %#v", len(keys), keys)
+ }
+
+ // Listing subkeys
+ req = logical.TestRequest(t, logical.UpdateOperation, "secret/bar")
+ req.Data["foo"] = "bar"
+ req.ClientToken = root
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Read a key with a LeaseID
+ req = logical.TestRequest(t, logical.ReadOperation, "secret/bar")
+ req.ClientToken = root
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil || resp.Secret == nil || resp.Secret.LeaseID == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ req = logical.TestRequest(t, logical.ListOperation, "leases/lookup/secret")
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil || resp.Data == nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+ keys = []string{}
+ if err := mapstructure.WeakDecode(resp.Data["keys"], &keys); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(keys) != 2 {
+ t.Fatalf("Expected 2 secret lease, got %d: %#v", len(keys), keys)
+ }
+ expected := []string{"bar/", "foo/"}
+ if !reflect.DeepEqual(expected, keys) {
+ t.Fatalf("exp: %#v, act: %#v", expected, keys)
+ }
+}
+
+func TestSystemBackend_renew(t *testing.T) {
+ core, b, root := testCoreSystemBackend(t)
+
+ // Create a key with a lease
+ req := logical.TestRequest(t, logical.UpdateOperation, "secret/foo")
+ req.Data["foo"] = "bar"
+ req.ClientToken = root
+ resp, err := core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Read a key with a LeaseID
+ req = logical.TestRequest(t, logical.ReadOperation, "secret/foo")
+ req.ClientToken = root
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil || resp.Secret == nil || resp.Secret.LeaseID == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Attempt renew
+ req2 := logical.TestRequest(t, logical.UpdateOperation, "leases/renew/"+resp.Secret.LeaseID)
+ resp2, err := b.HandleRequest(req2)
+ if err != logical.ErrInvalidRequest {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should get error about non-renewability
+ if resp2.Data["error"] != "lease is not renewable" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Add a TTL to the lease
+ req = logical.TestRequest(t, logical.UpdateOperation, "secret/foo")
+ req.Data["foo"] = "bar"
+ req.Data["ttl"] = "180s"
+ req.ClientToken = root
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Read a key with a LeaseID
+ req = logical.TestRequest(t, logical.ReadOperation, "secret/foo")
+ req.ClientToken = root
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil || resp.Secret == nil || resp.Secret.LeaseID == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Attempt renew
+ req2 = logical.TestRequest(t, logical.UpdateOperation, "leases/renew/"+resp.Secret.LeaseID)
+ resp2, err = b.HandleRequest(req2)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp2.IsError() {
+ t.Fatalf("got an error")
+ }
+ if resp2.Data == nil {
+ t.Fatal("nil data")
+ }
+ if resp.Secret.TTL != 180*time.Second {
+ t.Fatalf("bad lease duration: %v", resp.Secret.TTL)
+ }
+
+ // Test the other route path
+ req2 = logical.TestRequest(t, logical.UpdateOperation, "leases/renew")
+ req2.Data["lease_id"] = resp.Secret.LeaseID
+ resp2, err = b.HandleRequest(req2)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp2.IsError() {
+ t.Fatalf("got an error")
+ }
+ if resp2.Data == nil {
+ t.Fatal("nil data")
+ }
+ if resp.Secret.TTL != 180*time.Second {
+ t.Fatalf("bad lease duration: %v", resp.Secret.TTL)
+ }
+
+ // Test orig path
+ req2 = logical.TestRequest(t, logical.UpdateOperation, "renew")
+ req2.Data["lease_id"] = resp.Secret.LeaseID
+ resp2, err = b.HandleRequest(req2)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp2.IsError() {
+ t.Fatalf("got an error")
+ }
+ if resp2.Data == nil {
+ t.Fatal("nil data")
+ }
+ if resp.Secret.TTL != 180*time.Second {
+ t.Fatalf("bad lease duration: %v", resp.Secret.TTL)
+ }
+}
+
+func TestSystemBackend_renew_invalidID(t *testing.T) {
+ b := testSystemBackend(t)
+
+ // Attempt renew
+ req := logical.TestRequest(t, logical.UpdateOperation, "leases/renew/foobarbaz")
+ resp, err := b.HandleRequest(req)
+ if err != logical.ErrInvalidRequest {
+ t.Fatalf("err: %v", err)
+ }
+ if resp.Data["error"] != "lease not found or lease is not renewable" {
+ t.Fatalf("bad: %v", resp)
+ }
+
+ // Attempt renew with other method
+ req = logical.TestRequest(t, logical.UpdateOperation, "leases/renew")
+ req.Data["lease_id"] = "foobarbaz"
+ resp, err = b.HandleRequest(req)
+ if err != logical.ErrInvalidRequest {
+ t.Fatalf("err: %v", err)
+ }
+ if resp.Data["error"] != "lease not found or lease is not renewable" {
+ t.Fatalf("bad: %v", resp)
+ }
+}
+
+func TestSystemBackend_renew_invalidID_origUrl(t *testing.T) {
+ b := testSystemBackend(t)
+
+ // Attempt renew
+ req := logical.TestRequest(t, logical.UpdateOperation, "renew/foobarbaz")
+ resp, err := b.HandleRequest(req)
+ if err != logical.ErrInvalidRequest {
+ t.Fatalf("err: %v", err)
+ }
+ if resp.Data["error"] != "lease not found or lease is not renewable" {
+ t.Fatalf("bad: %v", resp)
+ }
+
+ // Attempt renew with other method
+ req = logical.TestRequest(t, logical.UpdateOperation, "renew")
+ req.Data["lease_id"] = "foobarbaz"
+ resp, err = b.HandleRequest(req)
+ if err != logical.ErrInvalidRequest {
+ t.Fatalf("err: %v", err)
+ }
+ if resp.Data["error"] != "lease not found or lease is not renewable" {
+ t.Fatalf("bad: %v", resp)
+ }
+}
+
+func TestSystemBackend_revoke(t *testing.T) {
+ core, b, root := testCoreSystemBackend(t)
+
+ // Create a key with a lease
+ req := logical.TestRequest(t, logical.UpdateOperation, "secret/foo")
+ req.Data["foo"] = "bar"
+ req.Data["lease"] = "1h"
+ req.ClientToken = root
+ resp, err := core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Read a key with a LeaseID
+ req = logical.TestRequest(t, logical.ReadOperation, "secret/foo")
+ req.ClientToken = root
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil || resp.Secret == nil || resp.Secret.LeaseID == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Attempt revoke
+ req2 := logical.TestRequest(t, logical.UpdateOperation, "revoke/"+resp.Secret.LeaseID)
+ resp2, err := b.HandleRequest(req2)
+ if err != nil {
+ t.Fatalf("err: %v %#v", err, resp2)
+ }
+ if resp2 != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Attempt renew
+ req3 := logical.TestRequest(t, logical.UpdateOperation, "renew/"+resp.Secret.LeaseID)
+ resp3, err := b.HandleRequest(req3)
+ if err != logical.ErrInvalidRequest {
+ t.Fatalf("err: %v", err)
+ }
+ if resp3.Data["error"] != "lease not found or lease is not renewable" {
+ t.Fatalf("bad: %v", resp)
+ }
+
+ // Read a key with a LeaseID
+ req = logical.TestRequest(t, logical.ReadOperation, "secret/foo")
+ req.ClientToken = root
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil || resp.Secret == nil || resp.Secret.LeaseID == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Test the other route path
+ req2 = logical.TestRequest(t, logical.UpdateOperation, "revoke")
+ req2.Data["lease_id"] = resp.Secret.LeaseID
+ resp2, err = b.HandleRequest(req2)
+ if err != nil {
+ t.Fatalf("err: %v %#v", err, resp2)
+ }
+ if resp2 != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Read a key with a LeaseID
+ req = logical.TestRequest(t, logical.ReadOperation, "secret/foo")
+ req.ClientToken = root
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil || resp.Secret == nil || resp.Secret.LeaseID == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Test the other route path
+ req2 = logical.TestRequest(t, logical.UpdateOperation, "leases/revoke")
+ req2.Data["lease_id"] = resp.Secret.LeaseID
+ resp2, err = b.HandleRequest(req2)
+ if err != nil {
+ t.Fatalf("err: %v %#v", err, resp2)
+ }
+ if resp2 != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
+
+func TestSystemBackend_revoke_invalidID(t *testing.T) {
+ b := testSystemBackend(t)
+
+ // Attempt revoke
+ req := logical.TestRequest(t, logical.UpdateOperation, "leases/revoke/foobarbaz")
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+
+ // Attempt revoke with other method
+ req = logical.TestRequest(t, logical.UpdateOperation, "leases/revoke")
+ req.Data["lease_id"] = "foobarbaz"
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+}
+
+func TestSystemBackend_revoke_invalidID_origUrl(t *testing.T) {
+ b := testSystemBackend(t)
+
+ // Attempt revoke
+ req := logical.TestRequest(t, logical.UpdateOperation, "revoke/foobarbaz")
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+
+ // Attempt revoke with other method
+ req = logical.TestRequest(t, logical.UpdateOperation, "revoke")
+ req.Data["lease_id"] = "foobarbaz"
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+}
+
+func TestSystemBackend_revokePrefix(t *testing.T) {
+ core, b, root := testCoreSystemBackend(t)
+
+ // Create a key with a lease
+ req := logical.TestRequest(t, logical.UpdateOperation, "secret/foo")
+ req.Data["foo"] = "bar"
+ req.Data["lease"] = "1h"
+ req.ClientToken = root
+ resp, err := core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Read a key with a LeaseID
+ req = logical.TestRequest(t, logical.ReadOperation, "secret/foo")
+ req.ClientToken = root
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil || resp.Secret == nil || resp.Secret.LeaseID == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Attempt revoke
+ req2 := logical.TestRequest(t, logical.UpdateOperation, "leases/revoke-prefix/secret/")
+ resp2, err := b.HandleRequest(req2)
+ if err != nil {
+ t.Fatalf("err: %v %#v", err, resp2)
+ }
+ if resp2 != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Attempt renew
+ req3 := logical.TestRequest(t, logical.UpdateOperation, "leases/renew/"+resp.Secret.LeaseID)
+ resp3, err := b.HandleRequest(req3)
+ if err != logical.ErrInvalidRequest {
+ t.Fatalf("err: %v", err)
+ }
+ if resp3.Data["error"] != "lease not found or lease is not renewable" {
+ t.Fatalf("bad: %v", resp)
+ }
+}
+
+func TestSystemBackend_revokePrefix_origUrl(t *testing.T) {
+ core, b, root := testCoreSystemBackend(t)
+
+ // Create a key with a lease
+ req := logical.TestRequest(t, logical.UpdateOperation, "secret/foo")
+ req.Data["foo"] = "bar"
+ req.Data["lease"] = "1h"
+ req.ClientToken = root
+ resp, err := core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Read a key with a LeaseID
+ req = logical.TestRequest(t, logical.ReadOperation, "secret/foo")
+ req.ClientToken = root
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil || resp.Secret == nil || resp.Secret.LeaseID == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Attempt revoke
+ req2 := logical.TestRequest(t, logical.UpdateOperation, "revoke-prefix/secret/")
+ resp2, err := b.HandleRequest(req2)
+ if err != nil {
+ t.Fatalf("err: %v %#v", err, resp2)
+ }
+ if resp2 != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Attempt renew
+ req3 := logical.TestRequest(t, logical.UpdateOperation, "renew/"+resp.Secret.LeaseID)
+ resp3, err := b.HandleRequest(req3)
+ if err != logical.ErrInvalidRequest {
+ t.Fatalf("err: %v", err)
+ }
+ if resp3.Data["error"] != "lease not found or lease is not renewable" {
+ t.Fatalf("bad: %v", resp)
+ }
+}
+
+func TestSystemBackend_revokePrefixAuth(t *testing.T) {
+ core, ts, _, _ := TestCoreWithTokenStore(t)
+ bc := &logical.BackendConfig{
+ Logger: core.logger,
+ System: logical.StaticSystemView{
+ DefaultLeaseTTLVal: time.Hour * 24,
+ MaxLeaseTTLVal: time.Hour * 24 * 32,
+ },
+ }
+ b, err := NewSystemBackend(core, bc)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ exp := ts.expiration
+
+ te := &TokenEntry{
+ ID: "foo",
+ Path: "auth/github/login/bar",
+ }
+ err = ts.create(te)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ te, err = ts.Lookup("foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if te == nil {
+ t.Fatal("token entry was nil")
+ }
+
+ // Create a new token
+ auth := &logical.Auth{
+ ClientToken: te.ID,
+ LeaseOptions: logical.LeaseOptions{
+ TTL: time.Hour,
+ },
+ }
+ err = exp.RegisterAuth(te.Path, auth)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "leases/revoke-prefix/auth/github/")
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ te, err = ts.Lookup(te.ID)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if te != nil {
+ t.Fatalf("bad: %v", te)
+ }
+}
+
+func TestSystemBackend_revokePrefixAuth_origUrl(t *testing.T) {
+ core, ts, _, _ := TestCoreWithTokenStore(t)
+ bc := &logical.BackendConfig{
+ Logger: core.logger,
+ System: logical.StaticSystemView{
+ DefaultLeaseTTLVal: time.Hour * 24,
+ MaxLeaseTTLVal: time.Hour * 24 * 32,
+ },
+ }
+ b, err := NewSystemBackend(core, bc)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ exp := ts.expiration
+
+ te := &TokenEntry{
+ ID: "foo",
+ Path: "auth/github/login/bar",
+ }
+ err = ts.create(te)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ te, err = ts.Lookup("foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if te == nil {
+ t.Fatal("token entry was nil")
+ }
+
+ // Create a new token
+ auth := &logical.Auth{
+ ClientToken: te.ID,
+ LeaseOptions: logical.LeaseOptions{
+ TTL: time.Hour,
+ },
+ }
+ err = exp.RegisterAuth(te.Path, auth)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "revoke-prefix/auth/github/")
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ te, err = ts.Lookup(te.ID)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if te != nil {
+ t.Fatalf("bad: %v", te)
+ }
+}
+
+func TestSystemBackend_authTable(t *testing.T) {
+ b := testSystemBackend(t)
+ req := logical.TestRequest(t, logical.ReadOperation, "auth")
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ exp := map[string]interface{}{
+ "token/": map[string]interface{}{
+ "type": "token",
+ "description": "token based credentials",
+ "config": map[string]interface{}{
+ "default_lease_ttl": int64(0),
+ "max_lease_ttl": int64(0),
+ },
+ "local": false,
+ },
+ }
+ if !reflect.DeepEqual(resp.Data, exp) {
+ t.Fatalf("got: %#v expect: %#v", resp.Data, exp)
+ }
+}
+
+func TestSystemBackend_enableAuth(t *testing.T) {
+ c, b, _ := testCoreSystemBackend(t)
+ c.credentialBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
+ return &NoopBackend{}, nil
+ }
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "auth/foo")
+ req.Data["type"] = "noop"
+
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+}
+
+func TestSystemBackend_enableAuth_invalid(t *testing.T) {
+ b := testSystemBackend(t)
+ req := logical.TestRequest(t, logical.UpdateOperation, "auth/foo")
+ req.Data["type"] = "nope"
+ resp, err := b.HandleRequest(req)
+ if err != logical.ErrInvalidRequest {
+ t.Fatalf("err: %v", err)
+ }
+ if resp.Data["error"] != "unknown backend type: nope" {
+ t.Fatalf("bad: %v", resp)
+ }
+}
+
+func TestSystemBackend_disableAuth(t *testing.T) {
+ c, b, _ := testCoreSystemBackend(t)
+ c.credentialBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
+ return &NoopBackend{}, nil
+ }
+
+ // Register the backend
+ req := logical.TestRequest(t, logical.UpdateOperation, "auth/foo")
+ req.Data["type"] = "noop"
+ b.HandleRequest(req)
+
+ // Deregister it
+ req = logical.TestRequest(t, logical.DeleteOperation, "auth/foo")
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+}
+
+func TestSystemBackend_policyList(t *testing.T) {
+ b := testSystemBackend(t)
+ req := logical.TestRequest(t, logical.ReadOperation, "policy")
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ exp := map[string]interface{}{
+ "keys": []string{"default", "root"},
+ "policies": []string{"default", "root"},
+ }
+ if !reflect.DeepEqual(resp.Data, exp) {
+ t.Fatalf("got: %#v expect: %#v", resp.Data, exp)
+ }
+}
+
+func TestSystemBackend_policyCRUD(t *testing.T) {
+ b := testSystemBackend(t)
+
+ // Create the policy
+ rules := `path "foo/" { policy = "read" }`
+ req := logical.TestRequest(t, logical.UpdateOperation, "policy/Foo")
+ req.Data["rules"] = rules
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %#v", err, resp)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Read the policy
+ req = logical.TestRequest(t, logical.ReadOperation, "policy/foo")
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ exp := map[string]interface{}{
+ "name": "foo",
+ "rules": rules,
+ }
+ if !reflect.DeepEqual(resp.Data, exp) {
+ t.Fatalf("got: %#v expect: %#v", resp.Data, exp)
+ }
+
+ // Read, and make sure that case has been normalized
+ req = logical.TestRequest(t, logical.ReadOperation, "policy/Foo")
+ resp, err = b.HandleRequest(req)
+ if resp != nil {
+ t.Fatalf("err: expected nil response, got %#v", *resp)
+ }
+
+ // List the policies
+ req = logical.TestRequest(t, logical.ReadOperation, "policy")
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ exp = map[string]interface{}{
+ "keys": []string{"default", "foo", "root"},
+ "policies": []string{"default", "foo", "root"},
+ }
+ if !reflect.DeepEqual(resp.Data, exp) {
+ t.Fatalf("got: %#v expect: %#v", resp.Data, exp)
+ }
+
+ // Delete the policy
+ req = logical.TestRequest(t, logical.DeleteOperation, "policy/foo")
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Read the policy (deleted)
+ req = logical.TestRequest(t, logical.ReadOperation, "policy/foo")
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // List the policies (deleted)
+ req = logical.TestRequest(t, logical.ReadOperation, "policy")
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ exp = map[string]interface{}{
+ "keys": []string{"default", "root"},
+ "policies": []string{"default", "root"},
+ }
+ if !reflect.DeepEqual(resp.Data, exp) {
+ t.Fatalf("got: %#v expect: %#v", resp.Data, exp)
+ }
+}
+
+func TestSystemBackend_enableAudit(t *testing.T) {
+ c, b, _ := testCoreSystemBackend(t)
+ c.auditBackends["noop"] = func(config *audit.BackendConfig) (audit.Backend, error) {
+ return &NoopAudit{
+ Config: config,
+ }, nil
+ }
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "audit/foo")
+ req.Data["type"] = "noop"
+
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+}
+
+func TestSystemBackend_auditHash(t *testing.T) {
+ c, b, _ := testCoreSystemBackend(t)
+ c.auditBackends["noop"] = func(config *audit.BackendConfig) (audit.Backend, error) {
+ view := &logical.InmemStorage{}
+ view.Put(&logical.StorageEntry{
+ Key: "salt",
+ Value: []byte("foo"),
+ })
+ var err error
+ config.Salt, err = salt.NewSalt(view, &salt.Config{
+ HMAC: sha256.New,
+ HMACType: "hmac-sha256",
+ })
+ if err != nil {
+ t.Fatalf("error getting new salt: %v", err)
+ }
+ return &NoopAudit{
+ Config: config,
+ }, nil
+ }
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "audit/foo")
+ req.Data["type"] = "noop"
+
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+
+ req = logical.TestRequest(t, logical.UpdateOperation, "audit-hash/foo")
+ req.Data["input"] = "bar"
+
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil || resp.Data == nil {
+ t.Fatalf("response or its data was nil")
+ }
+ hash, ok := resp.Data["hash"]
+ if !ok {
+ t.Fatalf("did not get hash back in response, response was %#v", resp.Data)
+ }
+ if hash.(string) != "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317" {
+ t.Fatalf("bad hash back: %s", hash.(string))
+ }
+}
+
+func TestSystemBackend_enableAudit_invalid(t *testing.T) {
+ b := testSystemBackend(t)
+ req := logical.TestRequest(t, logical.UpdateOperation, "audit/foo")
+ req.Data["type"] = "nope"
+ resp, err := b.HandleRequest(req)
+ if err != logical.ErrInvalidRequest {
+ t.Fatalf("err: %v", err)
+ }
+ if resp.Data["error"] != "unknown backend type: nope" {
+ t.Fatalf("bad: %v", resp)
+ }
+}
+
+func TestSystemBackend_auditTable(t *testing.T) {
+ c, b, _ := testCoreSystemBackend(t)
+ c.auditBackends["noop"] = func(config *audit.BackendConfig) (audit.Backend, error) {
+ return &NoopAudit{
+ Config: config,
+ }, nil
+ }
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "audit/foo")
+ req.Data["type"] = "noop"
+ req.Data["description"] = "testing"
+ req.Data["options"] = map[string]interface{}{
+ "foo": "bar",
+ }
+ req.Data["local"] = true
+ b.HandleRequest(req)
+
+ req = logical.TestRequest(t, logical.ReadOperation, "audit")
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ exp := map[string]interface{}{
+ "foo/": map[string]interface{}{
+ "path": "foo/",
+ "type": "noop",
+ "description": "testing",
+ "options": map[string]string{
+ "foo": "bar",
+ },
+ "local": true,
+ },
+ }
+ if !reflect.DeepEqual(resp.Data, exp) {
+ t.Fatalf("got: %#v expect: %#v", resp.Data, exp)
+ }
+}
+
+func TestSystemBackend_disableAudit(t *testing.T) {
+ c, b, _ := testCoreSystemBackend(t)
+ c.auditBackends["noop"] = func(config *audit.BackendConfig) (audit.Backend, error) {
+ return &NoopAudit{
+ Config: config,
+ }, nil
+ }
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "audit/foo")
+ req.Data["type"] = "noop"
+ req.Data["description"] = "testing"
+ req.Data["options"] = map[string]interface{}{
+ "foo": "bar",
+ }
+ b.HandleRequest(req)
+
+ // Deregister it
+ req = logical.TestRequest(t, logical.DeleteOperation, "audit/foo")
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+}
+
+func TestSystemBackend_rawRead_Protected(t *testing.T) {
+ b := testSystemBackend(t)
+
+ req := logical.TestRequest(t, logical.ReadOperation, "raw/"+keyringPath)
+ _, err := b.HandleRequest(req)
+ if err != logical.ErrInvalidRequest {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func TestSystemBackend_rawWrite_Protected(t *testing.T) {
+ b := testSystemBackend(t)
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "raw/"+keyringPath)
+ _, err := b.HandleRequest(req)
+ if err != logical.ErrInvalidRequest {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func TestSystemBackend_rawReadWrite(t *testing.T) {
+ c, b, _ := testCoreSystemBackend(t)
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "raw/sys/policy/test")
+ req.Data["value"] = `path "secret/" { policy = "read" }`
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+
+ // Read via raw API
+ req = logical.TestRequest(t, logical.ReadOperation, "raw/sys/policy/test")
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !strings.HasPrefix(resp.Data["value"].(string), "path") {
+ t.Fatalf("bad: %v", resp)
+ }
+
+ // Read the policy!
+ p, err := c.policyStore.GetPolicy("test")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if p == nil || len(p.Paths) == 0 {
+ t.Fatalf("missing policy %#v", p)
+ }
+ if p.Paths[0].Prefix != "secret/" || p.Paths[0].Policy != ReadCapability {
+ t.Fatalf("Bad: %#v", p)
+ }
+}
+
+func TestSystemBackend_rawDelete_Protected(t *testing.T) {
+ b := testSystemBackend(t)
+
+ req := logical.TestRequest(t, logical.DeleteOperation, "raw/"+keyringPath)
+ _, err := b.HandleRequest(req)
+ if err != logical.ErrInvalidRequest {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func TestSystemBackend_rawDelete(t *testing.T) {
+ c, b, _ := testCoreSystemBackend(t)
+
+ // set the policy!
+ p := &Policy{Name: "test"}
+ err := c.policyStore.SetPolicy(p)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Delete the policy
+ req := logical.TestRequest(t, logical.DeleteOperation, "raw/sys/policy/test")
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+
+ // Policy should be gone
+ c.policyStore.lru.Purge()
+ out, err := c.policyStore.GetPolicy("test")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out != nil {
+ t.Fatalf("policy should be gone")
+ }
+}
+
+func TestSystemBackend_keyStatus(t *testing.T) {
+ b := testSystemBackend(t)
+ req := logical.TestRequest(t, logical.ReadOperation, "key-status")
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ exp := map[string]interface{}{
+ "term": 1,
+ }
+ delete(resp.Data, "install_time")
+ if !reflect.DeepEqual(resp.Data, exp) {
+ t.Fatalf("got: %#v expect: %#v", resp.Data, exp)
+ }
+}
+
+func TestSystemBackend_rotate(t *testing.T) {
+ b := testSystemBackend(t)
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "rotate")
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+
+ req = logical.TestRequest(t, logical.ReadOperation, "key-status")
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ exp := map[string]interface{}{
+ "term": 2,
+ }
+ delete(resp.Data, "install_time")
+ if !reflect.DeepEqual(resp.Data, exp) {
+ t.Fatalf("got: %#v expect: %#v", resp.Data, exp)
+ }
+}
+
+func testSystemBackend(t *testing.T) logical.Backend {
+ c, _, _ := TestCoreUnsealed(t)
+ bc := &logical.BackendConfig{
+ Logger: c.logger,
+ System: logical.StaticSystemView{
+ DefaultLeaseTTLVal: time.Hour * 24,
+ MaxLeaseTTLVal: time.Hour * 24 * 32,
+ },
+ }
+
+ b, err := NewSystemBackend(c, bc)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ return b
+}
+
+func testCoreSystemBackend(t *testing.T) (*Core, logical.Backend, string) {
+ c, _, root := TestCoreUnsealed(t)
+ bc := &logical.BackendConfig{
+ Logger: c.logger,
+ System: logical.StaticSystemView{
+ DefaultLeaseTTLVal: time.Hour * 24,
+ MaxLeaseTTLVal: time.Hour * 24 * 32,
+ },
+ }
+
+ b, err := NewSystemBackend(c, bc)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return c, b, root
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/mount.go b/vendor/github.com/hashicorp/vault/vault/mount.go
new file mode 100644
index 0000000..d428eee
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/mount.go
@@ -0,0 +1,791 @@
+package vault
+
+import (
+ "crypto/sha1"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "sort"
+ "strings"
+ "time"
+
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/logical"
+)
+
+const (
+ // coreMountConfigPath is used to store the mount configuration.
+ // Mounts are protected within the Vault itself, which means they
+ // can only be viewed or modified after an unseal.
+ coreMountConfigPath = "core/mounts"
+
+ // coreLocalMountConfigPath is used to store mount configuration for local
+ // (non-replicated) mounts
+ coreLocalMountConfigPath = "core/local-mounts"
+
+ // backendBarrierPrefix is the prefix to the UUID used in the
+ // barrier view for the backends.
+ backendBarrierPrefix = "logical/"
+
+ // systemBarrierPrefix is the prefix used for the
+ // system logical backend.
+ systemBarrierPrefix = "sys/"
+
+ // mountTableType is the value we expect to find for the mount table and
+ // corresponding entries
+ mountTableType = "mounts"
+)
+
+var (
+ // loadMountsFailed if loadMounts encounters an error
+ errLoadMountsFailed = errors.New("failed to setup mount table")
+
+ // protectedMounts cannot be remounted
+ protectedMounts = []string{
+ "audit/",
+ "auth/",
+ "sys/",
+ "cubbyhole/",
+ }
+
+ untunableMounts = []string{
+ "cubbyhole/",
+ "sys/",
+ "audit/",
+ }
+
+ // singletonMounts can only exist in one location and are
+ // loaded by default. These are types, not paths.
+ singletonMounts = []string{
+ "cubbyhole",
+ "system",
+ }
+)
+
+// MountTable is used to represent the internal mount table
+type MountTable struct {
+ Type string `json:"type"`
+ Entries []*MountEntry `json:"entries"`
+}
+
+// shallowClone returns a copy of the mount table that
+// keeps the MountEntry locations, so as not to invalidate
+// other locations holding pointers. Care needs to be taken
+// if modifying entries rather than modifying the table itself
+func (t *MountTable) shallowClone() *MountTable {
+ mt := &MountTable{
+ Type: t.Type,
+ Entries: make([]*MountEntry, len(t.Entries)),
+ }
+ for i, e := range t.Entries {
+ mt.Entries[i] = e
+ }
+ return mt
+}
+
+// Hash is used to generate a hash value for the mount table
+func (t *MountTable) Hash() ([]byte, error) {
+ buf, err := json.Marshal(t)
+ if err != nil {
+ return nil, err
+ }
+ hash := sha1.Sum(buf)
+ return hash[:], nil
+}
+
+// setTaint is used to set the taint on given entry
+func (t *MountTable) setTaint(path string, value bool) *MountEntry {
+ n := len(t.Entries)
+ for i := 0; i < n; i++ {
+ if t.Entries[i].Path == path {
+ t.Entries[i].Tainted = value
+ return t.Entries[i]
+ }
+ }
+ return nil
+}
+
+// remove is used to remove a given path entry; returns the entry that was
+// removed
+func (t *MountTable) remove(path string) *MountEntry {
+ n := len(t.Entries)
+ for i := 0; i < n; i++ {
+ if entry := t.Entries[i]; entry.Path == path {
+ t.Entries[i], t.Entries[n-1] = t.Entries[n-1], nil
+ t.Entries = t.Entries[:n-1]
+ return entry
+ }
+ }
+ return nil
+}
+
+// sortEntriesByPath sorts the entries in the table by path and returns the
+// table; this is useful for tests
+func (t *MountTable) sortEntriesByPath() *MountTable {
+ sort.Slice(t.Entries, func(i, j int) bool {
+ return t.Entries[i].Path < t.Entries[j].Path
+ })
+ return t
+}
+
+// MountEntry is used to represent a mount table entry
+type MountEntry struct {
+ Table string `json:"table"` // The table it belongs to
+ Path string `json:"path"` // Mount Path
+ Type string `json:"type"` // Logical backend Type
+ Description string `json:"description"` // User-provided description
+ UUID string `json:"uuid"` // Barrier view UUID
+ Config MountConfig `json:"config"` // Configuration related to this mount (but not backend-derived)
+ Options map[string]string `json:"options"` // Backend options
+ Local bool `json:"local"` // Local mounts are not replicated or affected by replication
+ Tainted bool `json:"tainted,omitempty"` // Set as a Write-Ahead flag for unmount/remount
+}
+
+// MountConfig is used to hold settable options
+type MountConfig struct {
+ DefaultLeaseTTL time.Duration `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"` // Override for global default
+ MaxLeaseTTL time.Duration `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"` // Override for global default
+ ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"` // Override for global default
+}
+
+// Returns a deep copy of the mount entry
+func (e *MountEntry) Clone() *MountEntry {
+ optClone := make(map[string]string)
+ for k, v := range e.Options {
+ optClone[k] = v
+ }
+ return &MountEntry{
+ Table: e.Table,
+ Path: e.Path,
+ Type: e.Type,
+ Description: e.Description,
+ UUID: e.UUID,
+ Config: e.Config,
+ Options: optClone,
+ Local: e.Local,
+ Tainted: e.Tainted,
+ }
+}
+
+// Mount is used to mount a new backend to the mount table.
+func (c *Core) mount(entry *MountEntry) error {
+ // Ensure we end the path in a slash
+ if !strings.HasSuffix(entry.Path, "/") {
+ entry.Path += "/"
+ }
+
+ // Prevent protected paths from being mounted
+ for _, p := range protectedMounts {
+ if strings.HasPrefix(entry.Path, p) {
+ return logical.CodedError(403, fmt.Sprintf("cannot mount '%s'", entry.Path))
+ }
+ }
+
+ // Do not allow more than one instance of a singleton mount
+ for _, p := range singletonMounts {
+ if entry.Type == p {
+ return logical.CodedError(403, fmt.Sprintf("Cannot mount more than one instance of '%s'", entry.Type))
+ }
+ }
+
+ c.mountsLock.Lock()
+ defer c.mountsLock.Unlock()
+
+ // Verify there is no conflicting mount
+ if match := c.router.MatchingMount(entry.Path); match != "" {
+ return logical.CodedError(409, fmt.Sprintf("existing mount at %s", match))
+ }
+
+ // Generate a new UUID and view
+ if entry.UUID == "" {
+ entryUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ return err
+ }
+ entry.UUID = entryUUID
+ }
+ viewPath := backendBarrierPrefix + entry.UUID + "/"
+ view := NewBarrierView(c.barrier, viewPath)
+ sysView := c.mountEntrySysView(entry)
+
+ backend, err := c.newLogicalBackend(entry.Type, sysView, view, nil)
+ if err != nil {
+ return err
+ }
+ if backend == nil {
+ return fmt.Errorf("nil backend of type %q returned from creation function", entry.Type)
+ }
+
+ // Call initialize; this takes care of init tasks that must be run after
+ // the ignore paths are collected
+ if err := backend.Initialize(); err != nil {
+ return err
+ }
+
+ newTable := c.mounts.shallowClone()
+ newTable.Entries = append(newTable.Entries, entry)
+ if err := c.persistMounts(newTable, entry.Local); err != nil {
+ c.logger.Error("core: failed to update mount table", "error", err)
+ return logical.CodedError(500, "failed to update mount table")
+ }
+ c.mounts = newTable
+
+ if err := c.router.Mount(backend, entry.Path, entry, view); err != nil {
+ return err
+ }
+
+ if c.logger.IsInfo() {
+ c.logger.Info("core: successful mount", "path", entry.Path, "type", entry.Type)
+ }
+ return nil
+}
+
+// Unmount is used to unmount a path. The boolean indicates whether the mount
+// was found.
+func (c *Core) unmount(path string) (bool, error) {
+ // Ensure we end the path in a slash
+ if !strings.HasSuffix(path, "/") {
+ path += "/"
+ }
+
+ // Prevent protected paths from being unmounted
+ for _, p := range protectedMounts {
+ if strings.HasPrefix(path, p) {
+ return true, fmt.Errorf("cannot unmount '%s'", path)
+ }
+ }
+
+ // Verify exact match of the route
+ match := c.router.MatchingMount(path)
+ if match == "" || path != match {
+ return false, fmt.Errorf("no matching mount")
+ }
+
+ // Get the view for this backend
+ view := c.router.MatchingStorageView(path)
+
+ // Mark the entry as tainted
+ if err := c.taintMountEntry(path); err != nil {
+ return true, err
+ }
+
+ // Taint the router path to prevent routing. Note that in-flight requests
+ // are uncertain, right now.
+ if err := c.router.Taint(path); err != nil {
+ return true, err
+ }
+
+ // Invoke the rollback manager a final time
+ if err := c.rollback.Rollback(path); err != nil {
+ return true, err
+ }
+
+ // Revoke all the dynamic keys
+ if err := c.expiration.RevokePrefix(path); err != nil {
+ return true, err
+ }
+
+ // Call cleanup function if it exists
+ backend := c.router.MatchingBackend(path)
+ if backend != nil {
+ backend.Cleanup()
+ }
+
+ // Unmount the backend entirely
+ if err := c.router.Unmount(path); err != nil {
+ return true, err
+ }
+
+ // Clear the data in the view
+ if err := logical.ClearView(view); err != nil {
+ return true, err
+ }
+
+ // Remove the mount table entry
+ if err := c.removeMountEntry(path); err != nil {
+ return true, err
+ }
+ if c.logger.IsInfo() {
+ c.logger.Info("core: successfully unmounted", "path", path)
+ }
+ return true, nil
+}
+
+// removeMountEntry is used to remove an entry from the mount table
+func (c *Core) removeMountEntry(path string) error {
+ c.mountsLock.Lock()
+ defer c.mountsLock.Unlock()
+
+ // Remove the entry from the mount table
+ newTable := c.mounts.shallowClone()
+ entry := newTable.remove(path)
+ if entry == nil {
+ c.logger.Error("core: nil entry found removing entry in mounts table", "path", path)
+ return logical.CodedError(500, "failed to remove entry in mounts table")
+ }
+
+ // When unmounting all entries the JSON code will load back up from storage
+ // as a nil slice, which kills tests...just set it nil explicitly
+ if len(newTable.Entries) == 0 {
+ newTable.Entries = nil
+ }
+
+ // Update the mount table
+ if err := c.persistMounts(newTable, entry.Local); err != nil {
+ c.logger.Error("core: failed to remove entry from mounts table", "error", err)
+ return logical.CodedError(500, "failed to remove entry from mounts table")
+ }
+
+ c.mounts = newTable
+ return nil
+}
+
+// taintMountEntry is used to mark an entry in the mount table as tainted
+func (c *Core) taintMountEntry(path string) error {
+ c.mountsLock.Lock()
+ defer c.mountsLock.Unlock()
+
+ // As modifying the taint of an entry affects shallow clones,
+ // we simply use the original
+ entry := c.mounts.setTaint(path, true)
+ if entry == nil {
+ c.logger.Error("core: nil entry found tainting entry in mounts table", "path", path)
+ return logical.CodedError(500, "failed to taint entry in mounts table")
+ }
+
+ // Update the mount table
+ if err := c.persistMounts(c.mounts, entry.Local); err != nil {
+ c.logger.Error("core: failed to taint entry in mounts table", "error", err)
+ return logical.CodedError(500, "failed to taint entry in mounts table")
+ }
+
+ return nil
+}
+
+// Remount is used to remount a path at a new mount point.
+func (c *Core) remount(src, dst string) error {
+ // Ensure we end the path in a slash
+ if !strings.HasSuffix(src, "/") {
+ src += "/"
+ }
+ if !strings.HasSuffix(dst, "/") {
+ dst += "/"
+ }
+
+ // Prevent protected paths from being remounted
+ for _, p := range protectedMounts {
+ if strings.HasPrefix(src, p) {
+ return fmt.Errorf("cannot remount '%s'", src)
+ }
+ }
+
+ // Verify exact match of the route
+ match := c.router.MatchingMount(src)
+ if match == "" || src != match {
+ return fmt.Errorf("no matching mount at '%s'", src)
+ }
+
+ if match := c.router.MatchingMount(dst); match != "" {
+ return fmt.Errorf("existing mount at '%s'", match)
+ }
+
+ // Mark the entry as tainted
+ if err := c.taintMountEntry(src); err != nil {
+ return err
+ }
+
+ // Taint the router path to prevent routing
+ if err := c.router.Taint(src); err != nil {
+ return err
+ }
+
+ // Invoke the rollback manager a final time
+ if err := c.rollback.Rollback(src); err != nil {
+ return err
+ }
+
+ // Revoke all the dynamic keys
+ if err := c.expiration.RevokePrefix(src); err != nil {
+ return err
+ }
+
+ c.mountsLock.Lock()
+ var ent *MountEntry
+ for _, ent = range c.mounts.Entries {
+ if ent.Path == src {
+ ent.Path = dst
+ ent.Tainted = false
+ break
+ }
+ }
+
+ if ent == nil {
+ c.logger.Error("core: failed to find entry in mounts table")
+ return logical.CodedError(500, "failed to find entry in mounts table")
+ }
+
+ // Update the mount table
+ if err := c.persistMounts(c.mounts, ent.Local); err != nil {
+ ent.Path = src
+ ent.Tainted = true
+ c.mountsLock.Unlock()
+ c.logger.Error("core: failed to update mounts table", "error", err)
+ return logical.CodedError(500, "failed to update mounts table")
+ }
+ c.mountsLock.Unlock()
+
+ // Remount the backend
+ if err := c.router.Remount(src, dst); err != nil {
+ return err
+ }
+
+ // Un-taint the path
+ if err := c.router.Untaint(dst); err != nil {
+ return err
+ }
+
+ if c.logger.IsInfo() {
+ c.logger.Info("core: successful remount", "old_path", src, "new_path", dst)
+ }
+ return nil
+}
+
+// loadMounts is invoked as part of postUnseal to load the mount table
+func (c *Core) loadMounts() error {
+ mountTable := &MountTable{}
+ localMountTable := &MountTable{}
+ // Load the existing mount table
+ raw, err := c.barrier.Get(coreMountConfigPath)
+ if err != nil {
+ c.logger.Error("core: failed to read mount table", "error", err)
+ return errLoadMountsFailed
+ }
+ rawLocal, err := c.barrier.Get(coreLocalMountConfigPath)
+ if err != nil {
+ c.logger.Error("core: failed to read local mount table", "error", err)
+ return errLoadMountsFailed
+ }
+
+ c.mountsLock.Lock()
+ defer c.mountsLock.Unlock()
+
+ if raw != nil {
+ // Check if the persisted value has canary in the beginning. If
+ // yes, decompress the table and then JSON decode it. If not,
+ // simply JSON decode it.
+ if err := jsonutil.DecodeJSON(raw.Value, mountTable); err != nil {
+ c.logger.Error("core: failed to decompress and/or decode the mount table", "error", err)
+ return err
+ }
+ c.mounts = mountTable
+ }
+ if rawLocal != nil {
+ if err := jsonutil.DecodeJSON(rawLocal.Value, localMountTable); err != nil {
+ c.logger.Error("core: failed to decompress and/or decode the local mount table", "error", err)
+ return err
+ }
+ c.mounts.Entries = append(c.mounts.Entries, localMountTable.Entries...)
+ }
+
+ // Ensure that required entries are loaded, or new ones
+ // added may never get loaded at all. Note that this
+ // is only designed to work with singletons, as it checks
+ // by type only.
+ if c.mounts != nil {
+ needPersist := false
+
+ // Upgrade to typed mount table
+ if c.mounts.Type == "" {
+ c.mounts.Type = mountTableType
+ needPersist = true
+ }
+
+ for _, requiredMount := range requiredMountTable().Entries {
+ foundRequired := false
+ for _, coreMount := range c.mounts.Entries {
+ if coreMount.Type == requiredMount.Type {
+ foundRequired = true
+ break
+ }
+ }
+ if !foundRequired {
+ c.mounts.Entries = append(c.mounts.Entries, requiredMount)
+ needPersist = true
+ }
+ }
+
+ // Upgrade to table-scoped entries
+ for _, entry := range c.mounts.Entries {
+ if entry.Type == "cubbyhole" && !entry.Local {
+ entry.Local = true
+ needPersist = true
+ }
+ if entry.Table == "" {
+ entry.Table = c.mounts.Type
+ needPersist = true
+ }
+ }
+
+ // Done if we have restored the mount table and we don't need
+ // to persist
+ if !needPersist {
+ return nil
+ }
+ } else {
+ // Create and persist the default mount table
+ c.mounts = defaultMountTable()
+ }
+
+ if err := c.persistMounts(c.mounts, false); err != nil {
+ c.logger.Error("core: failed to persist mount table", "error", err)
+ return errLoadMountsFailed
+ }
+ return nil
+}
+
+// persistMounts is used to persist the mount table after modification
+func (c *Core) persistMounts(table *MountTable, localOnly bool) error {
+ if table.Type != mountTableType {
+ c.logger.Error("core: given table to persist has wrong type", "actual_type", table.Type, "expected_type", mountTableType)
+ return fmt.Errorf("invalid table type given, not persisting")
+ }
+
+ for _, entry := range table.Entries {
+ if entry.Table != table.Type {
+ c.logger.Error("core: given entry to persist in mount table has wrong table value", "path", entry.Path, "entry_table_type", entry.Table, "actual_type", table.Type)
+ return fmt.Errorf("invalid mount entry found, not persisting")
+ }
+ }
+
+ nonLocalMounts := &MountTable{
+ Type: mountTableType,
+ }
+
+ localMounts := &MountTable{
+ Type: mountTableType,
+ }
+
+ for _, entry := range table.Entries {
+ if entry.Local {
+ localMounts.Entries = append(localMounts.Entries, entry)
+ } else {
+ nonLocalMounts.Entries = append(nonLocalMounts.Entries, entry)
+ }
+ }
+
+ if !localOnly {
+ // Encode the mount table into JSON and compress it (lzw).
+ compressedBytes, err := jsonutil.EncodeJSONAndCompress(nonLocalMounts, nil)
+ if err != nil {
+ c.logger.Error("core: failed to encode and/or compress the mount table", "error", err)
+ return err
+ }
+
+ // Create an entry
+ entry := &Entry{
+ Key: coreMountConfigPath,
+ Value: compressedBytes,
+ }
+
+ // Write to the physical backend
+ if err := c.barrier.Put(entry); err != nil {
+ c.logger.Error("core: failed to persist mount table", "error", err)
+ return err
+ }
+ }
+
+ // Repeat with local mounts
+ compressedBytes, err := jsonutil.EncodeJSONAndCompress(localMounts, nil)
+ if err != nil {
+ c.logger.Error("core: failed to encode and/or compress the local mount table", "error", err)
+ return err
+ }
+
+ entry := &Entry{
+ Key: coreLocalMountConfigPath,
+ Value: compressedBytes,
+ }
+
+ if err := c.barrier.Put(entry); err != nil {
+ c.logger.Error("core: failed to persist local mount table", "error", err)
+ return err
+ }
+
+ return nil
+}
+
+// setupMounts is invoked after we've loaded the mount table to
+// initialize the logical backends and setup the router
+func (c *Core) setupMounts() error {
+ c.mountsLock.Lock()
+ defer c.mountsLock.Unlock()
+
+ var backend logical.Backend
+ var view *BarrierView
+ var err error
+
+ for _, entry := range c.mounts.Entries {
+ // Initialize the backend, special casing for system
+ barrierPath := backendBarrierPrefix + entry.UUID + "/"
+ if entry.Type == "system" {
+ barrierPath = systemBarrierPrefix
+ }
+
+ // Create a barrier view using the UUID
+ view = NewBarrierView(c.barrier, barrierPath)
+ sysView := c.mountEntrySysView(entry)
+ // Initialize the backend
+ // Create the new backend
+ backend, err = c.newLogicalBackend(entry.Type, sysView, view, nil)
+ if err != nil {
+ c.logger.Error("core: failed to create mount entry", "path", entry.Path, "error", err)
+ return errLoadMountsFailed
+ }
+ if backend == nil {
+ return fmt.Errorf("created mount entry of type %q is nil", entry.Type)
+ }
+
+ if err := backend.Initialize(); err != nil {
+ return err
+ }
+
+ switch entry.Type {
+ case "system":
+ c.systemBarrierView = view
+ case "cubbyhole":
+ ch := backend.(*CubbyholeBackend)
+ ch.saltUUID = entry.UUID
+ ch.storageView = view
+ }
+
+ // Mount the backend
+ err = c.router.Mount(backend, entry.Path, entry, view)
+ if err != nil {
+ c.logger.Error("core: failed to mount entry", "path", entry.Path, "error", err)
+ return errLoadMountsFailed
+ } else {
+ if c.logger.IsInfo() {
+ c.logger.Info("core: successfully mounted backend", "type", entry.Type, "path", entry.Path)
+ }
+ }
+
+ // Ensure the path is tainted if set in the mount table
+ if entry.Tainted {
+ c.router.Taint(entry.Path)
+ }
+ }
+ return nil
+}
+
+// unloadMounts is used before we seal the vault to reset the mounts to
+// their unloaded state, calling Cleanup if defined. This is reversed by load and setup mounts.
+func (c *Core) unloadMounts() error {
+ c.mountsLock.Lock()
+ defer c.mountsLock.Unlock()
+
+ if c.mounts != nil {
+ mountTable := c.mounts.shallowClone()
+ for _, e := range mountTable.Entries {
+ backend := c.router.MatchingBackend(e.Path)
+ if backend != nil {
+ backend.Cleanup()
+ }
+ }
+ }
+
+ c.mounts = nil
+ c.router = NewRouter()
+ c.systemBarrierView = nil
+ return nil
+}
+
+// newLogicalBackend is used to create and configure a new logical backend by name
+func (c *Core) newLogicalBackend(t string, sysView logical.SystemView, view logical.Storage, conf map[string]string) (logical.Backend, error) {
+ f, ok := c.logicalBackends[t]
+ if !ok {
+ return nil, fmt.Errorf("unknown backend type: %s", t)
+ }
+
+ config := &logical.BackendConfig{
+ StorageView: view,
+ Logger: c.logger,
+ Config: conf,
+ System: sysView,
+ }
+
+ b, err := f(config)
+ if err != nil {
+ return nil, err
+ }
+ if b == nil {
+ return nil, fmt.Errorf("nil backend of type %q returned from factory", t)
+ }
+ return b, nil
+}
+
+// mountEntrySysView creates a logical.SystemView from global and
+// mount-specific entries; because this should be called when setting
+// up a mountEntry, it doesn't check to ensure that me is not nil
+func (c *Core) mountEntrySysView(entry *MountEntry) logical.SystemView {
+ return dynamicSystemView{
+ core: c,
+ mountEntry: entry,
+ }
+}
+
+// defaultMountTable creates a default mount table
+func defaultMountTable() *MountTable {
+ table := &MountTable{
+ Type: mountTableType,
+ }
+ mountUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ panic(fmt.Sprintf("could not create default mount table UUID: %v", err))
+ }
+ genericMount := &MountEntry{
+ Table: mountTableType,
+ Path: "secret/",
+ Type: "generic",
+ Description: "generic secret storage",
+ UUID: mountUUID,
+ }
+ table.Entries = append(table.Entries, genericMount)
+ table.Entries = append(table.Entries, requiredMountTable().Entries...)
+ return table
+}
+
+// requiredMountTable() creates a mount table with entries required
+// to be available
+func requiredMountTable() *MountTable {
+ table := &MountTable{
+ Type: mountTableType,
+ }
+ cubbyholeUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ panic(fmt.Sprintf("could not create cubbyhole UUID: %v", err))
+ }
+ cubbyholeMount := &MountEntry{
+ Table: mountTableType,
+ Path: "cubbyhole/",
+ Type: "cubbyhole",
+ Description: "per-token private secret storage",
+ UUID: cubbyholeUUID,
+ Local: true,
+ }
+
+ sysUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ panic(fmt.Sprintf("could not create sys UUID: %v", err))
+ }
+ sysMount := &MountEntry{
+ Table: mountTableType,
+ Path: "sys/",
+ Type: "system",
+ Description: "system endpoints used for control, policy and debugging",
+ UUID: sysUUID,
+ }
+ table.Entries = append(table.Entries, cubbyholeMount)
+ table.Entries = append(table.Entries, sysMount)
+ return table
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/mount_test.go b/vendor/github.com/hashicorp/vault/vault/mount_test.go
new file mode 100644
index 0000000..4e8d25f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/mount_test.go
@@ -0,0 +1,630 @@
+package vault
+
+import (
+ "encoding/json"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/audit"
+ "github.com/hashicorp/vault/helper/compressutil"
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestCore_DefaultMountTable(t *testing.T) {
+ c, keys, _ := TestCoreUnsealed(t)
+ verifyDefaultTable(t, c.mounts)
+
+ // Start a second core with same physical
+ conf := &CoreConfig{
+ Physical: c.physical,
+ DisableMlock: true,
+ }
+ c2, err := NewCore(conf)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ for i, key := range keys {
+ unseal, err := TestCoreUnseal(c2, key)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if i+1 == len(keys) && !unseal {
+ t.Fatalf("should be unsealed")
+ }
+ }
+
+ // Verify matching mount tables
+ if !reflect.DeepEqual(c.mounts.sortEntriesByPath(), c2.mounts.sortEntriesByPath()) {
+ t.Fatalf("mismatch: %v %v", c.mounts, c2.mounts)
+ }
+}
+
+func TestCore_Mount(t *testing.T) {
+ c, keys, _ := TestCoreUnsealed(t)
+ me := &MountEntry{
+ Table: mountTableType,
+ Path: "foo",
+ Type: "generic",
+ }
+ err := c.mount(me)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ match := c.router.MatchingMount("foo/bar")
+ if match != "foo/" {
+ t.Fatalf("missing mount")
+ }
+
+ conf := &CoreConfig{
+ Physical: c.physical,
+ DisableMlock: true,
+ }
+ c2, err := NewCore(conf)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ for i, key := range keys {
+ unseal, err := TestCoreUnseal(c2, key)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if i+1 == len(keys) && !unseal {
+ t.Fatalf("should be unsealed")
+ }
+ }
+
+ // Verify matching mount tables
+ if !reflect.DeepEqual(c.mounts.sortEntriesByPath(), c2.mounts.sortEntriesByPath()) {
+ t.Fatalf("mismatch: %v %v", c.mounts, c2.mounts)
+ }
+}
+
+// Test that the local table actually gets populated as expected with local
+// entries, and that upon reading the entries from both are recombined
+// correctly
+func TestCore_Mount_Local(t *testing.T) {
+ c, _, _ := TestCoreUnsealed(t)
+
+ c.mounts = &MountTable{
+ Type: mountTableType,
+ Entries: []*MountEntry{
+ &MountEntry{
+ Table: mountTableType,
+ Path: "noop/",
+ Type: "generic",
+ UUID: "abcd",
+ },
+ &MountEntry{
+ Table: mountTableType,
+ Path: "noop2/",
+ Type: "generic",
+ UUID: "bcde",
+ },
+ },
+ }
+
+ // Both should set up successfully
+ err := c.setupMounts()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(c.mounts.Entries) != 2 {
+ t.Fatalf("expected two entries, got %d", len(c.mounts.Entries))
+ }
+
+ rawLocal, err := c.barrier.Get(coreLocalMountConfigPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if rawLocal == nil {
+ t.Fatal("expected non-nil local mounts")
+ }
+ localMountsTable := &MountTable{}
+ if err := jsonutil.DecodeJSON(rawLocal.Value, localMountsTable); err != nil {
+ t.Fatal(err)
+ }
+ if len(localMountsTable.Entries) != 1 || localMountsTable.Entries[0].Type != "cubbyhole" {
+ t.Fatalf("expected only cubbyhole entry in local mount table, got %#v", localMountsTable)
+ }
+
+ c.mounts.Entries[1].Local = true
+ if err := c.persistMounts(c.mounts, false); err != nil {
+ t.Fatal(err)
+ }
+
+ rawLocal, err = c.barrier.Get(coreLocalMountConfigPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if rawLocal == nil {
+ t.Fatal("expected non-nil local mount")
+ }
+ localMountsTable = &MountTable{}
+ if err := jsonutil.DecodeJSON(rawLocal.Value, localMountsTable); err != nil {
+ t.Fatal(err)
+ }
+ // This requires some explanation: because we're directly munging the mount
+ // table, the table initially when core unseals contains cubbyhole as per
+ // above, but then we overwrite it with our own table with one local entry,
+ // so we should now only expect the noop2 entry
+ if len(localMountsTable.Entries) != 1 || localMountsTable.Entries[0].Path != "noop2/" {
+ t.Fatalf("expected one entry in local mount table, got %#v", localMountsTable)
+ }
+
+ oldMounts := c.mounts
+ if err := c.loadMounts(); err != nil {
+ t.Fatal(err)
+ }
+ compEntries := c.mounts.Entries[:0]
+ // Filter out required mounts
+ for _, v := range c.mounts.Entries {
+ if v.Type == "generic" {
+ compEntries = append(compEntries, v)
+ }
+ }
+ c.mounts.Entries = compEntries
+
+ if !reflect.DeepEqual(oldMounts, c.mounts) {
+ t.Fatalf("expected\n%#v\ngot\n%#v\n", oldMounts, c.mounts)
+ }
+
+ if len(c.mounts.Entries) != 2 {
+ t.Fatalf("expected two mount entries, got %#v", localMountsTable)
+ }
+}
+
+func TestCore_Unmount(t *testing.T) {
+ c, keys, _ := TestCoreUnsealed(t)
+ existed, err := c.unmount("secret")
+ if !existed || err != nil {
+ t.Fatalf("existed: %v; err: %v", existed, err)
+ }
+
+ match := c.router.MatchingMount("secret/foo")
+ if match != "" {
+ t.Fatalf("backend present")
+ }
+
+ conf := &CoreConfig{
+ Physical: c.physical,
+ DisableMlock: true,
+ }
+ c2, err := NewCore(conf)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ for i, key := range keys {
+ unseal, err := TestCoreUnseal(c2, key)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if i+1 == len(keys) && !unseal {
+ t.Fatalf("should be unsealed")
+ }
+ }
+
+ // Verify matching mount tables
+ if !reflect.DeepEqual(c.mounts.sortEntriesByPath(), c2.mounts.sortEntriesByPath()) {
+ t.Fatalf("mismatch: %v %v", c.mounts, c2.mounts)
+ }
+}
+
+func TestCore_Unmount_Cleanup(t *testing.T) {
+ noop := &NoopBackend{}
+ c, _, root := TestCoreUnsealed(t)
+ c.logicalBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
+ return noop, nil
+ }
+
+ // Mount the noop backend
+ me := &MountEntry{
+ Table: mountTableType,
+ Path: "test/",
+ Type: "noop",
+ }
+ if err := c.mount(me); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Store the view
+ view := c.router.MatchingStorageView("test/")
+
+ // Inject data
+ se := &logical.StorageEntry{
+ Key: "plstodelete",
+ Value: []byte("test"),
+ }
+ if err := view.Put(se); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Setup response
+ resp := &logical.Response{
+ Secret: &logical.Secret{
+ LeaseOptions: logical.LeaseOptions{
+ TTL: time.Hour,
+ },
+ },
+ Data: map[string]interface{}{
+ "foo": "bar",
+ },
+ }
+ noop.Response = resp
+
+ // Generate leased secret
+ r := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "test/foo",
+ ClientToken: root,
+ }
+ resp, err := c.HandleRequest(r)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp.Secret.LeaseID == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Unmount, this should cleanup
+ if existed, err := c.unmount("test/"); !existed || err != nil {
+ t.Fatalf("existed: %v; err: %v", existed, err)
+ }
+
+ // Rollback should be invoked
+ if noop.Requests[1].Operation != logical.RollbackOperation {
+ t.Fatalf("bad: %#v", noop.Requests)
+ }
+
+ // Revoke should be invoked
+ if noop.Requests[2].Operation != logical.RevokeOperation {
+ t.Fatalf("bad: %#v", noop.Requests)
+ }
+ if noop.Requests[2].Path != "foo" {
+ t.Fatalf("bad: %#v", noop.Requests)
+ }
+
+ // View should be empty
+ out, err := logical.CollectKeys(view)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(out) != 0 {
+ t.Fatalf("bad: %#v", out)
+ }
+}
+
+func TestCore_Remount(t *testing.T) {
+ c, keys, _ := TestCoreUnsealed(t)
+ err := c.remount("secret", "foo")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ match := c.router.MatchingMount("foo/bar")
+ if match != "foo/" {
+ t.Fatalf("failed remount")
+ }
+
+ conf := &CoreConfig{
+ Physical: c.physical,
+ DisableMlock: true,
+ }
+ c2, err := NewCore(conf)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ for i, key := range keys {
+ unseal, err := TestCoreUnseal(c2, key)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if i+1 == len(keys) && !unseal {
+ t.Fatalf("should be unsealed")
+ }
+ }
+
+ // Verify matching mount tables
+ if !reflect.DeepEqual(c.mounts, c2.mounts) {
+ t.Fatalf("mismatch: %v %v", c.mounts, c2.mounts)
+ }
+}
+
+func TestCore_Remount_Cleanup(t *testing.T) {
+ noop := &NoopBackend{}
+ c, _, root := TestCoreUnsealed(t)
+ c.logicalBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
+ return noop, nil
+ }
+
+ // Mount the noop backend
+ me := &MountEntry{
+ Table: mountTableType,
+ Path: "test/",
+ Type: "noop",
+ }
+ if err := c.mount(me); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Store the view
+ view := c.router.MatchingStorageView("test/")
+
+ // Inject data
+ se := &logical.StorageEntry{
+ Key: "plstokeep",
+ Value: []byte("test"),
+ }
+ if err := view.Put(se); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Setup response
+ resp := &logical.Response{
+ Secret: &logical.Secret{
+ LeaseOptions: logical.LeaseOptions{
+ TTL: time.Hour,
+ },
+ },
+ Data: map[string]interface{}{
+ "foo": "bar",
+ },
+ }
+ noop.Response = resp
+
+ // Generate leased secret
+ r := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "test/foo",
+ ClientToken: root,
+ }
+ resp, err := c.HandleRequest(r)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp.Secret.LeaseID == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Remount, this should cleanup
+ if err := c.remount("test/", "new/"); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Rollback should be invoked
+ if noop.Requests[1].Operation != logical.RollbackOperation {
+ t.Fatalf("bad: %#v", noop.Requests)
+ }
+
+ // Revoke should be invoked
+ if noop.Requests[2].Operation != logical.RevokeOperation {
+ t.Fatalf("bad: %#v", noop.Requests)
+ }
+ if noop.Requests[2].Path != "foo" {
+ t.Fatalf("bad: %#v", noop.Requests)
+ }
+
+ // View should not be empty
+ out, err := logical.CollectKeys(view)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(out) != 1 && out[0] != "plstokeep" {
+ t.Fatalf("bad: %#v", out)
+ }
+}
+
+func TestCore_Remount_Protected(t *testing.T) {
+ c, _, _ := TestCoreUnsealed(t)
+ err := c.remount("sys", "foo")
+ if err.Error() != "cannot remount 'sys/'" {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func TestDefaultMountTable(t *testing.T) {
+ table := defaultMountTable()
+ verifyDefaultTable(t, table)
+}
+
+func TestCore_MountTable_UpgradeToTyped(t *testing.T) {
+ c, _, _ := TestCoreUnsealed(t)
+
+ c.auditBackends["noop"] = func(config *audit.BackendConfig) (audit.Backend, error) {
+ return &NoopAudit{
+ Config: config,
+ }, nil
+ }
+
+ me := &MountEntry{
+ Table: auditTableType,
+ Path: "foo",
+ Type: "noop",
+ }
+ err := c.enableAudit(me)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ c.credentialBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
+ return &NoopBackend{}, nil
+ }
+
+ me = &MountEntry{
+ Table: credentialTableType,
+ Path: "foo",
+ Type: "noop",
+ }
+ err = c.enableCredential(me)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ testCore_MountTable_UpgradeToTyped_Common(t, c, "mounts")
+ testCore_MountTable_UpgradeToTyped_Common(t, c, "audits")
+ testCore_MountTable_UpgradeToTyped_Common(t, c, "credentials")
+}
+
+func testCore_MountTable_UpgradeToTyped_Common(
+ t *testing.T,
+ c *Core,
+ testType string) {
+
+ var path string
+ var mt *MountTable
+ switch testType {
+ case "mounts":
+ path = coreMountConfigPath
+ mt = c.mounts
+ case "audits":
+ path = coreAuditConfigPath
+ mt = c.audit
+ case "credentials":
+ path = coreAuthConfigPath
+ mt = c.auth
+ }
+
+ // We filter out local entries here since the logic is rather dumb
+ // (straight JSON comparison) and doesn't seal well with the separate
+ // locations
+ newEntries := mt.Entries[:0]
+ for _, entry := range mt.Entries {
+ if !entry.Local {
+ newEntries = append(newEntries, entry)
+ }
+ }
+ mt.Entries = newEntries
+
+ // Save the expected table
+ goodJson, err := json.Marshal(mt)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a pre-typed version
+ mt.Type = ""
+ for _, entry := range mt.Entries {
+ entry.Table = ""
+ }
+
+ raw, err := json.Marshal(mt)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if reflect.DeepEqual(raw, goodJson) {
+ t.Fatalf("bad: values here should be different")
+ }
+
+ entry := &Entry{
+ Key: path,
+ Value: raw,
+ }
+ if err := c.barrier.Put(entry); err != nil {
+ t.Fatal(err)
+ }
+
+ var persistFunc func(*MountTable, bool) error
+
+ // It should load successfully and be upgraded and persisted
+ switch testType {
+ case "mounts":
+ err = c.loadMounts()
+ persistFunc = c.persistMounts
+ mt = c.mounts
+ case "credentials":
+ err = c.loadCredentials()
+ persistFunc = c.persistAuth
+ mt = c.auth
+ case "audits":
+ err = c.loadAudits()
+ persistFunc = c.persistAudit
+ mt = c.audit
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ entry, err = c.barrier.Get(path)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ decompressedBytes, uncompressed, err := compressutil.Decompress(entry.Value)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ actual := decompressedBytes
+ if uncompressed {
+ actual = entry.Value
+ }
+
+ if strings.TrimSpace(string(actual)) != strings.TrimSpace(string(goodJson)) {
+ t.Fatalf("bad: expected\n%s\nactual\n%s\n", string(goodJson), string(actual))
+ }
+
+ // Now try saving invalid versions
+ origTableType := mt.Type
+ mt.Type = "foo"
+ if err := persistFunc(mt, false); err == nil {
+ t.Fatal("expected error")
+ }
+
+ if len(mt.Entries) > 0 {
+ mt.Type = origTableType
+ mt.Entries[0].Table = "bar"
+ if err := persistFunc(mt, false); err == nil {
+ t.Fatal("expected error")
+ }
+
+ mt.Entries[0].Table = mt.Type
+ if err := persistFunc(mt, false); err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+func verifyDefaultTable(t *testing.T, table *MountTable) {
+ if len(table.Entries) != 3 {
+ t.Fatalf("bad: %v", table.Entries)
+ }
+ table.sortEntriesByPath()
+ for idx, entry := range table.Entries {
+ switch idx {
+ case 0:
+ if entry.Path != "cubbyhole/" {
+ t.Fatalf("bad: %v", entry)
+ }
+ if entry.Type != "cubbyhole" {
+ t.Fatalf("bad: %v", entry)
+ }
+ case 1:
+ if entry.Path != "secret/" {
+ t.Fatalf("bad: %v", entry)
+ }
+ if entry.Type != "generic" {
+ t.Fatalf("bad: %v", entry)
+ }
+ case 2:
+ if entry.Path != "sys/" {
+ t.Fatalf("bad: %v", entry)
+ }
+ if entry.Type != "system" {
+ t.Fatalf("bad: %v", entry)
+ }
+ }
+ if entry.Table != mountTableType {
+ t.Fatalf("bad: %v", entry)
+ }
+ if entry.Description == "" {
+ t.Fatalf("bad: %v", entry)
+ }
+ if entry.UUID == "" {
+ t.Fatalf("bad: %v", entry)
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/policy.go b/vendor/github.com/hashicorp/vault/vault/policy.go
new file mode 100644
index 0000000..c808c2a
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/policy.go
@@ -0,0 +1,267 @@
+package vault
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/hashicorp/errwrap"
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/hcl"
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/vault/helper/parseutil"
+)
+
+const (
+ DenyCapability = "deny"
+ CreateCapability = "create"
+ ReadCapability = "read"
+ UpdateCapability = "update"
+ DeleteCapability = "delete"
+ ListCapability = "list"
+ SudoCapability = "sudo"
+ RootCapability = "root"
+
+ // Backwards compatibility
+ OldDenyPathPolicy = "deny"
+ OldReadPathPolicy = "read"
+ OldWritePathPolicy = "write"
+ OldSudoPathPolicy = "sudo"
+)
+
+const (
+ DenyCapabilityInt uint32 = 1 << iota
+ CreateCapabilityInt
+ ReadCapabilityInt
+ UpdateCapabilityInt
+ DeleteCapabilityInt
+ ListCapabilityInt
+ SudoCapabilityInt
+)
+
+var (
+ cap2Int = map[string]uint32{
+ DenyCapability: DenyCapabilityInt,
+ CreateCapability: CreateCapabilityInt,
+ ReadCapability: ReadCapabilityInt,
+ UpdateCapability: UpdateCapabilityInt,
+ DeleteCapability: DeleteCapabilityInt,
+ ListCapability: ListCapabilityInt,
+ SudoCapability: SudoCapabilityInt,
+ }
+)
+
+// Policy is used to represent the policy specified by
+// an ACL configuration.
+type Policy struct {
+ Name string `hcl:"name"`
+ Paths []*PathCapabilities `hcl:"-"`
+ Raw string
+}
+
+// PathCapabilities represents a policy for a path in the namespace.
+type PathCapabilities struct {
+ Prefix string
+ Policy string
+ Permissions *Permissions
+ Glob bool
+ Capabilities []string
+
+ // These keys are used at the top level to make the HCL nicer; we store in
+ // the Permissions object though
+ MinWrappingTTLHCL interface{} `hcl:"min_wrapping_ttl"`
+ MaxWrappingTTLHCL interface{} `hcl:"max_wrapping_ttl"`
+ AllowedParametersHCL map[string][]interface{} `hcl:"allowed_parameters"`
+ DeniedParametersHCL map[string][]interface{} `hcl:"denied_parameters"`
+}
+
+type Permissions struct {
+ CapabilitiesBitmap uint32
+ MinWrappingTTL time.Duration
+ MaxWrappingTTL time.Duration
+ AllowedParameters map[string][]interface{}
+ DeniedParameters map[string][]interface{}
+}
+
+// Parse is used to parse the specified ACL rules into an
+// intermediary set of policies, before being compiled into
+// the ACL
+func Parse(rules string) (*Policy, error) {
+ // Parse the rules
+ root, err := hcl.Parse(rules)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to parse policy: %s", err)
+ }
+
+ // Top-level item should be the object list
+ list, ok := root.Node.(*ast.ObjectList)
+ if !ok {
+ return nil, fmt.Errorf("Failed to parse policy: does not contain a root object")
+ }
+
+ // Check for invalid top-level keys
+ valid := []string{
+ "name",
+ "path",
+ }
+ if err := checkHCLKeys(list, valid); err != nil {
+ return nil, fmt.Errorf("Failed to parse policy: %s", err)
+ }
+
+ // Create the initial policy and store the raw text of the rules
+ var p Policy
+ p.Raw = rules
+ if err := hcl.DecodeObject(&p, list); err != nil {
+ return nil, fmt.Errorf("Failed to parse policy: %s", err)
+ }
+
+ if o := list.Filter("path"); len(o.Items) > 0 {
+ if err := parsePaths(&p, o); err != nil {
+ return nil, fmt.Errorf("Failed to parse policy: %s", err)
+ }
+ }
+
+ return &p, nil
+}
+
+func parsePaths(result *Policy, list *ast.ObjectList) error {
+ paths := make([]*PathCapabilities, 0, len(list.Items))
+ for _, item := range list.Items {
+ key := "path"
+ if len(item.Keys) > 0 {
+ key = item.Keys[0].Token.Value().(string)
+ }
+ valid := []string{
+ "policy",
+ "capabilities",
+ "allowed_parameters",
+ "denied_parameters",
+ "min_wrapping_ttl",
+ "max_wrapping_ttl",
+ }
+ if err := checkHCLKeys(item.Val, valid); err != nil {
+ return multierror.Prefix(err, fmt.Sprintf("path %q:", key))
+ }
+
+ var pc PathCapabilities
+
+ // allocate memory so that DecodeObject can initialize the Permissions struct
+ pc.Permissions = new(Permissions)
+
+ pc.Prefix = key
+ if err := hcl.DecodeObject(&pc, item.Val); err != nil {
+ return multierror.Prefix(err, fmt.Sprintf("path %q:", key))
+ }
+
+ // Strip a leading '/' as paths in Vault start after the / in the API path
+ if len(pc.Prefix) > 0 && pc.Prefix[0] == '/' {
+ pc.Prefix = pc.Prefix[1:]
+ }
+
+ // Strip the glob character if found
+ if strings.HasSuffix(pc.Prefix, "*") {
+ pc.Prefix = strings.TrimSuffix(pc.Prefix, "*")
+ pc.Glob = true
+ }
+
+ // Map old-style policies into capabilities
+ if len(pc.Policy) > 0 {
+ switch pc.Policy {
+ case OldDenyPathPolicy:
+ pc.Capabilities = []string{DenyCapability}
+ case OldReadPathPolicy:
+ pc.Capabilities = append(pc.Capabilities, []string{ReadCapability, ListCapability}...)
+ case OldWritePathPolicy:
+ pc.Capabilities = append(pc.Capabilities, []string{CreateCapability, ReadCapability, UpdateCapability, DeleteCapability, ListCapability}...)
+ case OldSudoPathPolicy:
+ pc.Capabilities = append(pc.Capabilities, []string{CreateCapability, ReadCapability, UpdateCapability, DeleteCapability, ListCapability, SudoCapability}...)
+ default:
+ return fmt.Errorf("path %q: invalid policy '%s'", key, pc.Policy)
+ }
+ }
+
+ // Initialize the map
+ pc.Permissions.CapabilitiesBitmap = 0
+ for _, cap := range pc.Capabilities {
+ switch cap {
+ // If it's deny, don't include any other capability
+ case DenyCapability:
+ pc.Capabilities = []string{DenyCapability}
+ pc.Permissions.CapabilitiesBitmap = DenyCapabilityInt
+ goto PathFinished
+ case CreateCapability, ReadCapability, UpdateCapability, DeleteCapability, ListCapability, SudoCapability:
+ pc.Permissions.CapabilitiesBitmap |= cap2Int[cap]
+ default:
+ return fmt.Errorf("path %q: invalid capability '%s'", key, cap)
+ }
+ }
+
+ if pc.AllowedParametersHCL != nil {
+ pc.Permissions.AllowedParameters = make(map[string][]interface{}, len(pc.AllowedParametersHCL))
+ for key, val := range pc.AllowedParametersHCL {
+ pc.Permissions.AllowedParameters[strings.ToLower(key)] = val
+ }
+ }
+ if pc.DeniedParametersHCL != nil {
+ pc.Permissions.DeniedParameters = make(map[string][]interface{}, len(pc.DeniedParametersHCL))
+
+ for key, val := range pc.DeniedParametersHCL {
+ pc.Permissions.DeniedParameters[strings.ToLower(key)] = val
+ }
+ }
+ if pc.MinWrappingTTLHCL != nil {
+ dur, err := parseutil.ParseDurationSecond(pc.MinWrappingTTLHCL)
+ if err != nil {
+ return errwrap.Wrapf("error parsing min_wrapping_ttl: {{err}}", err)
+ }
+ pc.Permissions.MinWrappingTTL = dur
+ }
+ if pc.MaxWrappingTTLHCL != nil {
+ dur, err := parseutil.ParseDurationSecond(pc.MaxWrappingTTLHCL)
+ if err != nil {
+ return errwrap.Wrapf("error parsing max_wrapping_ttl: {{err}}", err)
+ }
+ pc.Permissions.MaxWrappingTTL = dur
+ }
+ if pc.Permissions.MinWrappingTTL != 0 &&
+ pc.Permissions.MaxWrappingTTL != 0 &&
+ pc.Permissions.MaxWrappingTTL < pc.Permissions.MinWrappingTTL {
+ return errors.New("max_wrapping_ttl cannot be less than min_wrapping_ttl")
+ }
+
+ PathFinished:
+ paths = append(paths, &pc)
+ }
+
+ result.Paths = paths
+ return nil
+}
+
+func checkHCLKeys(node ast.Node, valid []string) error {
+ var list *ast.ObjectList
+ switch n := node.(type) {
+ case *ast.ObjectList:
+ list = n
+ case *ast.ObjectType:
+ list = n.List
+ default:
+ return fmt.Errorf("cannot check HCL keys of type %T", n)
+ }
+
+ validMap := make(map[string]struct{}, len(valid))
+ for _, v := range valid {
+ validMap[v] = struct{}{}
+ }
+
+ var result error
+ for _, item := range list.Items {
+ key := item.Keys[0].Token.Value().(string)
+ if _, ok := validMap[key]; !ok {
+ result = multierror.Append(result, fmt.Errorf(
+ "invalid key '%s' on line %d", key, item.Assign.Line))
+ }
+ }
+
+ return result
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/policy_store.go b/vendor/github.com/hashicorp/vault/vault/policy_store.go
new file mode 100644
index 0000000..0768f76
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/policy_store.go
@@ -0,0 +1,389 @@
+package vault
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/armon/go-metrics"
+ "github.com/hashicorp/errwrap"
+ "github.com/hashicorp/golang-lru"
+ "github.com/hashicorp/vault/helper/consts"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/logical"
+)
+
+const (
+ // policySubPath is the sub-path used for the policy store
+ // view. This is nested under the system view.
+ policySubPath = "policy/"
+
+ // policyCacheSize is the number of policies that are kept cached
+ policyCacheSize = 1024
+
+ // responseWrappingPolicyName is the name of the fixed policy
+ responseWrappingPolicyName = "response-wrapping"
+
+ // responseWrappingPolicy is the policy that ensures cubbyhole response
+ // wrapping can always succeed. Note that sys/wrapping/lookup isn't
+ // contained here because using it would revoke the token anyways, so there
+ // isn't much point.
+ responseWrappingPolicy = `
+path "cubbyhole/response" {
+ capabilities = ["create", "read"]
+}
+
+path "sys/wrapping/unwrap" {
+ capabilities = ["update"]
+}
+`
+
+ // defaultPolicy is the "default" policy
+ defaultPolicy = `
+# Allow tokens to look up their own properties
+path "auth/token/lookup-self" {
+ capabilities = ["read"]
+}
+
+# Allow tokens to renew themselves
+path "auth/token/renew-self" {
+ capabilities = ["update"]
+}
+
+# Allow tokens to revoke themselves
+path "auth/token/revoke-self" {
+ capabilities = ["update"]
+}
+
+# Allow a token to look up its own capabilities on a path
+path "sys/capabilities-self" {
+ capabilities = ["update"]
+}
+
+# Allow a token to renew a lease via lease_id in the request body; old path for
+# old clients, new path for newer
+path "sys/renew" {
+ capabilities = ["update"]
+}
+path "sys/leases/renew" {
+ capabilities = ["update"]
+}
+
+# Allow looking up lease properties. This requires knowing the lease ID ahead
+# of time and does not divulge any sensitive information.
+path "sys/leases/lookup" {
+ capabilities = ["update"]
+}
+
+# Allow a token to manage its own cubbyhole
+path "cubbyhole/*" {
+ capabilities = ["create", "read", "update", "delete", "list"]
+}
+
+# Allow a token to wrap arbitrary values in a response-wrapping token
+path "sys/wrapping/wrap" {
+ capabilities = ["update"]
+}
+
+# Allow a token to look up the creation time and TTL of a given
+# response-wrapping token
+path "sys/wrapping/lookup" {
+ capabilities = ["update"]
+}
+
+# Allow a token to unwrap a response-wrapping token. This is a convenience to
+# avoid client token swapping since this is also part of the response wrapping
+# policy.
+path "sys/wrapping/unwrap" {
+ capabilities = ["update"]
+}
+`
+)
+
+var (
+ immutablePolicies = []string{
+ "root",
+ responseWrappingPolicyName,
+ }
+ nonAssignablePolicies = []string{
+ responseWrappingPolicyName,
+ }
+)
+
+// PolicyStore is used to provide durable storage of policy, and to
+// manage ACLs associated with them.
+type PolicyStore struct {
+ view *BarrierView
+ lru *lru.TwoQueueCache
+}
+
+// PolicyEntry is used to store a policy by name
+type PolicyEntry struct {
+ Version int
+ Raw string
+}
+
+// NewPolicyStore creates a new PolicyStore that is backed
+// using a given view. It used used to durable store and manage named policy.
+func NewPolicyStore(view *BarrierView, system logical.SystemView) *PolicyStore {
+ p := &PolicyStore{
+ view: view,
+ }
+ if !system.CachingDisabled() {
+ cache, _ := lru.New2Q(policyCacheSize)
+ p.lru = cache
+ }
+
+ return p
+}
+
+// setupPolicyStore is used to initialize the policy store
+// when the vault is being unsealed.
+func (c *Core) setupPolicyStore() error {
+ // Create a sub-view
+ view := c.systemBarrierView.SubView(policySubPath)
+
+ // Create the policy store
+ sysView := &dynamicSystemView{core: c}
+ c.policyStore = NewPolicyStore(view, sysView)
+
+ if sysView.ReplicationState() == consts.ReplicationSecondary {
+ // Policies will sync from the primary
+ return nil
+ }
+
+ // Ensure that the default policy exists, and if not, create it
+ policy, err := c.policyStore.GetPolicy("default")
+ if err != nil {
+ return errwrap.Wrapf("error fetching default policy from store: {{err}}", err)
+ }
+ if policy == nil {
+ err := c.policyStore.createDefaultPolicy()
+ if err != nil {
+ return err
+ }
+ }
+
+ // Ensure that the cubbyhole response wrapping policy exists
+ policy, err = c.policyStore.GetPolicy(responseWrappingPolicyName)
+ if err != nil {
+ return errwrap.Wrapf("error fetching response-wrapping policy from store: {{err}}", err)
+ }
+ if policy == nil || policy.Raw != responseWrappingPolicy {
+ err := c.policyStore.createResponseWrappingPolicy()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// teardownPolicyStore is used to reverse setupPolicyStore
+// when the vault is being sealed.
+func (c *Core) teardownPolicyStore() error {
+ c.policyStore = nil
+ return nil
+}
+
+func (ps *PolicyStore) invalidate(name string) {
+ if ps.lru == nil {
+ // Nothing to do if the cache is not used
+ return
+ }
+
+ // This may come with a prefixed "/" due to joining the file path
+ ps.lru.Remove(strings.TrimPrefix(name, "/"))
+}
+
+// SetPolicy is used to create or update the given policy
+func (ps *PolicyStore) SetPolicy(p *Policy) error {
+ defer metrics.MeasureSince([]string{"policy", "set_policy"}, time.Now())
+ if p.Name == "" {
+ return fmt.Errorf("policy name missing")
+ }
+ if strutil.StrListContains(immutablePolicies, p.Name) {
+ return fmt.Errorf("cannot update %s policy", p.Name)
+ }
+
+ return ps.setPolicyInternal(p)
+}
+
+func (ps *PolicyStore) setPolicyInternal(p *Policy) error {
+ // Create the entry
+ entry, err := logical.StorageEntryJSON(p.Name, &PolicyEntry{
+ Version: 2,
+ Raw: p.Raw,
+ })
+ if err != nil {
+ return fmt.Errorf("failed to create entry: %v", err)
+ }
+ if err := ps.view.Put(entry); err != nil {
+ return fmt.Errorf("failed to persist policy: %v", err)
+ }
+
+ if ps.lru != nil {
+ // Update the LRU cache
+ ps.lru.Add(p.Name, p)
+ }
+ return nil
+}
+
+// GetPolicy is used to fetch the named policy
+func (ps *PolicyStore) GetPolicy(name string) (*Policy, error) {
+ defer metrics.MeasureSince([]string{"policy", "get_policy"}, time.Now())
+ if ps.lru != nil {
+ // Check for cached policy
+ if raw, ok := ps.lru.Get(name); ok {
+ return raw.(*Policy), nil
+ }
+ }
+
+ // Special case the root policy
+ if name == "root" {
+ p := &Policy{Name: "root"}
+ if ps.lru != nil {
+ ps.lru.Add(p.Name, p)
+ }
+ return p, nil
+ }
+
+ // Load the policy in
+ out, err := ps.view.Get(name)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read policy: %v", err)
+ }
+ if out == nil {
+ return nil, nil
+ }
+
+ // In Vault 0.1.X we stored the raw policy, but in
+ // Vault 0.2 we switch to the PolicyEntry
+ policyEntry := new(PolicyEntry)
+ var policy *Policy
+ if err := out.DecodeJSON(policyEntry); err == nil {
+ // Parse normally
+ p, err := Parse(policyEntry.Raw)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse policy: %v", err)
+ }
+ p.Name = name
+ policy = p
+
+ } else {
+ // On error, attempt to use V1 parsing
+ p, err := Parse(string(out.Value))
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse policy: %v", err)
+ }
+ p.Name = name
+
+ // V1 used implicit glob, we need to do a fix-up
+ for _, pp := range p.Paths {
+ pp.Glob = true
+ }
+ policy = p
+ }
+
+ if ps.lru != nil {
+ // Update the LRU cache
+ ps.lru.Add(name, policy)
+ }
+
+ return policy, nil
+}
+
+// ListPolicies is used to list the available policies
+func (ps *PolicyStore) ListPolicies() ([]string, error) {
+ defer metrics.MeasureSince([]string{"policy", "list_policies"}, time.Now())
+ // Scan the view, since the policy names are the same as the
+ // key names.
+ keys, err := logical.CollectKeys(ps.view)
+
+ for _, nonAssignable := range nonAssignablePolicies {
+ deleteIndex := -1
+ //Find indices of non-assignable policies in keys
+ for index, key := range keys {
+ if key == nonAssignable {
+ // Delete collection outside the loop
+ deleteIndex = index
+ break
+ }
+ }
+ // Remove non-assignable policies when found
+ if deleteIndex != -1 {
+ keys = append(keys[:deleteIndex], keys[deleteIndex+1:]...)
+ }
+ }
+
+ return keys, err
+}
+
+// DeletePolicy is used to delete the named policy
+func (ps *PolicyStore) DeletePolicy(name string) error {
+ defer metrics.MeasureSince([]string{"policy", "delete_policy"}, time.Now())
+ if strutil.StrListContains(immutablePolicies, name) {
+ return fmt.Errorf("cannot delete %s policy", name)
+ }
+ if name == "default" {
+ return fmt.Errorf("cannot delete default policy")
+ }
+ if err := ps.view.Delete(name); err != nil {
+ return fmt.Errorf("failed to delete policy: %v", err)
+ }
+
+ if ps.lru != nil {
+ // Clear the cache
+ ps.lru.Remove(name)
+ }
+ return nil
+}
+
+// ACL is used to return an ACL which is built using the
+// named policies.
+func (ps *PolicyStore) ACL(names ...string) (*ACL, error) {
+ // Fetch the policies
+ var policy []*Policy
+ for _, name := range names {
+ p, err := ps.GetPolicy(name)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get policy '%s': %v", name, err)
+ }
+ policy = append(policy, p)
+ }
+
+ // Construct the ACL
+ acl, err := NewACL(policy)
+ if err != nil {
+ return nil, fmt.Errorf("failed to construct ACL: %v", err)
+ }
+ return acl, nil
+}
+
+func (ps *PolicyStore) createDefaultPolicy() error {
+ policy, err := Parse(defaultPolicy)
+ if err != nil {
+ return errwrap.Wrapf("error parsing default policy: {{err}}", err)
+ }
+
+ if policy == nil {
+ return fmt.Errorf("parsing default policy resulted in nil policy")
+ }
+
+ policy.Name = "default"
+ return ps.setPolicyInternal(policy)
+}
+
+func (ps *PolicyStore) createResponseWrappingPolicy() error {
+ policy, err := Parse(responseWrappingPolicy)
+ if err != nil {
+ return errwrap.Wrapf(fmt.Sprintf("error parsing %s policy: {{err}}", responseWrappingPolicyName), err)
+ }
+
+ if policy == nil {
+ return fmt.Errorf("parsing %s policy resulted in nil policy", responseWrappingPolicyName)
+ }
+
+ policy.Name = responseWrappingPolicyName
+ return ps.setPolicyInternal(policy)
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/policy_store_test.go b/vendor/github.com/hashicorp/vault/vault/policy_store_test.go
new file mode 100644
index 0000000..dafca34
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/policy_store_test.go
@@ -0,0 +1,218 @@
+package vault
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+func mockPolicyStore(t *testing.T) *PolicyStore {
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "foo/")
+ p := NewPolicyStore(view, logical.TestSystemView())
+ return p
+}
+
+func mockPolicyStoreNoCache(t *testing.T) *PolicyStore {
+ sysView := logical.TestSystemView()
+ sysView.CachingDisabledVal = true
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "foo/")
+ p := NewPolicyStore(view, sysView)
+ return p
+}
+
+func TestPolicyStore_Root(t *testing.T) {
+ ps := mockPolicyStore(t)
+
+ // Get should return a special policy
+ p, err := ps.GetPolicy("root")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if p == nil {
+ t.Fatalf("bad: %v", p)
+ }
+ if p.Name != "root" {
+ t.Fatalf("bad: %v", p)
+ }
+
+ // Set should fail
+ err = ps.SetPolicy(p)
+ if err.Error() != "cannot update root policy" {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Delete should fail
+ err = ps.DeletePolicy("root")
+ if err.Error() != "cannot delete root policy" {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func TestPolicyStore_CRUD(t *testing.T) {
+ ps := mockPolicyStore(t)
+ testPolicyStore_CRUD(t, ps)
+
+ ps = mockPolicyStoreNoCache(t)
+ testPolicyStore_CRUD(t, ps)
+}
+
+func testPolicyStore_CRUD(t *testing.T, ps *PolicyStore) {
+ // Get should return nothing
+ p, err := ps.GetPolicy("dev")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if p != nil {
+ t.Fatalf("bad: %v", p)
+ }
+
+ // Delete should be no-op
+ err = ps.DeletePolicy("dev")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // List should be blank
+ out, err := ps.ListPolicies()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(out) != 0 {
+ t.Fatalf("bad: %v", out)
+ }
+
+ // Set should work
+ policy, _ := Parse(aclPolicy)
+ err = ps.SetPolicy(policy)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Get should work
+ p, err = ps.GetPolicy("dev")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !reflect.DeepEqual(p, policy) {
+ t.Fatalf("bad: %v", p)
+ }
+
+ // List should be one element
+ out, err = ps.ListPolicies()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(out) != 1 || out[0] != "dev" {
+ t.Fatalf("bad: %v", out)
+ }
+
+ // Delete should be clear the entry
+ err = ps.DeletePolicy("dev")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Get should fail
+ p, err = ps.GetPolicy("dev")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if p != nil {
+ t.Fatalf("bad: %v", p)
+ }
+}
+
+// Test predefined policy handling
+func TestPolicyStore_Predefined(t *testing.T) {
+ core, _, _ := TestCoreUnsealed(t)
+ // Ensure both default policies are created
+ err := core.setupPolicyStore()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ // List should be two elements
+ out, err := core.policyStore.ListPolicies()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ // This shouldn't contain response-wrapping since it's non-assignable
+ if len(out) != 1 || out[0] != "default" {
+ t.Fatalf("bad: %v", out)
+ }
+
+ pCubby, err := core.policyStore.GetPolicy("response-wrapping")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if pCubby.Raw != responseWrappingPolicy {
+ t.Fatalf("bad: expected\n%s\ngot\n%s\n", responseWrappingPolicy, pCubby.Raw)
+ }
+ pRoot, err := core.policyStore.GetPolicy("root")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ err = core.policyStore.SetPolicy(pCubby)
+ if err == nil {
+ t.Fatalf("expected err setting %s", pCubby.Name)
+ }
+ err = core.policyStore.SetPolicy(pRoot)
+ if err == nil {
+ t.Fatalf("expected err setting %s", pRoot.Name)
+ }
+ err = core.policyStore.DeletePolicy(pCubby.Name)
+ if err == nil {
+ t.Fatalf("expected err deleting %s", pCubby.Name)
+ }
+ err = core.policyStore.DeletePolicy(pRoot.Name)
+ if err == nil {
+ t.Fatalf("expected err deleting %s", pRoot.Name)
+ }
+}
+
+func TestPolicyStore_ACL(t *testing.T) {
+ ps := mockPolicyStore(t)
+
+ policy, _ := Parse(aclPolicy)
+ err := ps.SetPolicy(policy)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ policy, _ = Parse(aclPolicy2)
+ err = ps.SetPolicy(policy)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ acl, err := ps.ACL("dev", "ops")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ testLayeredACL(t, acl)
+}
+
+func TestPolicyStore_v1Upgrade(t *testing.T) {
+ ps := mockPolicyStore(t)
+
+ // Put a V1 record
+ raw := `path "foo" { policy = "read" }`
+ ps.view.Put(&logical.StorageEntry{Key: "old", Value: []byte(raw)})
+
+ // Do a read
+ p, err := ps.GetPolicy("old")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if p == nil || len(p.Paths) != 1 {
+ t.Fatalf("bad policy: %#v", p)
+ }
+
+ // Check that glob is enabled
+ if !p.Paths[0].Glob {
+ t.Fatalf("should enable glob")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/policy_test.go b/vendor/github.com/hashicorp/vault/vault/policy_test.go
new file mode 100644
index 0000000..123844a
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/policy_test.go
@@ -0,0 +1,314 @@
+package vault
+
+import (
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+)
+
+var rawPolicy = strings.TrimSpace(`
+# Developer policy
+name = "dev"
+
+# Deny all paths by default
+path "*" {
+ policy = "deny"
+}
+
+# Allow full access to staging
+path "stage/*" {
+ policy = "sudo"
+}
+
+# Limited read privilege to production
+path "prod/version" {
+ policy = "read"
+}
+
+# Read access to foobar
+# Also tests stripping of leading slash and parsing of min/max as string and
+# integer
+path "/foo/bar" {
+ policy = "read"
+ min_wrapping_ttl = 300
+ max_wrapping_ttl = "1h"
+}
+
+# Add capabilities for creation and sudo to foobar
+# This will be separate; they are combined when compiled into an ACL
+# Also tests reverse string/int handling to the above
+path "foo/bar" {
+ capabilities = ["create", "sudo"]
+ min_wrapping_ttl = "300s"
+ max_wrapping_ttl = 3600
+}
+
+# Check that only allowed_parameters are being added to foobar
+path "foo/bar" {
+ capabilities = ["create", "sudo"]
+ allowed_parameters = {
+ "zip" = []
+ "zap" = []
+ }
+}
+
+# Check that only denied_parameters are being added to bazbar
+path "baz/bar" {
+ capabilities = ["create", "sudo"]
+ denied_parameters = {
+ "zip" = []
+ "zap" = []
+ }
+}
+
+# Check that both allowed and denied parameters are being added to bizbar
+path "biz/bar" {
+ capabilities = ["create", "sudo"]
+ allowed_parameters = {
+ "zim" = []
+ "zam" = []
+ }
+ denied_parameters = {
+ "zip" = []
+ "zap" = []
+ }
+}
+path "test/types" {
+ capabilities = ["create", "sudo"]
+ allowed_parameters = {
+ "map" = [{"good" = "one"}]
+ "int" = [1, 2]
+ }
+ denied_parameters = {
+ "string" = ["test"]
+ "bool" = [false]
+ }
+}
+`)
+
+func TestPolicy_Parse(t *testing.T) {
+ p, err := Parse(rawPolicy)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if p.Name != "dev" {
+ t.Fatalf("bad name: %q", p.Name)
+ }
+
+ expect := []*PathCapabilities{
+ &PathCapabilities{
+ Prefix: "",
+ Policy: "deny",
+ Capabilities: []string{
+ "deny",
+ },
+ Permissions: &Permissions{CapabilitiesBitmap: DenyCapabilityInt},
+ Glob: true,
+ },
+ &PathCapabilities{
+ Prefix: "stage/",
+ Policy: "sudo",
+ Capabilities: []string{
+ "create",
+ "read",
+ "update",
+ "delete",
+ "list",
+ "sudo",
+ },
+ Permissions: &Permissions{
+ CapabilitiesBitmap: (CreateCapabilityInt | ReadCapabilityInt | UpdateCapabilityInt | DeleteCapabilityInt | ListCapabilityInt | SudoCapabilityInt),
+ },
+ Glob: true,
+ },
+ &PathCapabilities{
+ Prefix: "prod/version",
+ Policy: "read",
+ Capabilities: []string{
+ "read",
+ "list",
+ },
+ Permissions: &Permissions{CapabilitiesBitmap: (ReadCapabilityInt | ListCapabilityInt)},
+ Glob: false,
+ },
+ &PathCapabilities{
+ Prefix: "foo/bar",
+ Policy: "read",
+ Capabilities: []string{
+ "read",
+ "list",
+ },
+ MinWrappingTTLHCL: 300,
+ MaxWrappingTTLHCL: "1h",
+ Permissions: &Permissions{
+ CapabilitiesBitmap: (ReadCapabilityInt | ListCapabilityInt),
+ MinWrappingTTL: 300 * time.Second,
+ MaxWrappingTTL: 3600 * time.Second,
+ },
+ Glob: false,
+ },
+ &PathCapabilities{
+ Prefix: "foo/bar",
+ Policy: "",
+ Capabilities: []string{
+ "create",
+ "sudo",
+ },
+ MinWrappingTTLHCL: "300s",
+ MaxWrappingTTLHCL: 3600,
+ Permissions: &Permissions{
+ CapabilitiesBitmap: (CreateCapabilityInt | SudoCapabilityInt),
+ MinWrappingTTL: 300 * time.Second,
+ MaxWrappingTTL: 3600 * time.Second,
+ },
+ Glob: false,
+ },
+ &PathCapabilities{
+ Prefix: "foo/bar",
+ Policy: "",
+ Capabilities: []string{
+ "create",
+ "sudo",
+ },
+ AllowedParametersHCL: map[string][]interface{}{"zip": {}, "zap": {}},
+ Permissions: &Permissions{
+ CapabilitiesBitmap: (CreateCapabilityInt | SudoCapabilityInt),
+ AllowedParameters: map[string][]interface{}{"zip": {}, "zap": {}},
+ },
+ Glob: false,
+ },
+ &PathCapabilities{
+ Prefix: "baz/bar",
+ Policy: "",
+ Capabilities: []string{
+ "create",
+ "sudo",
+ },
+ DeniedParametersHCL: map[string][]interface{}{"zip": []interface{}{}, "zap": []interface{}{}},
+ Permissions: &Permissions{
+ CapabilitiesBitmap: (CreateCapabilityInt | SudoCapabilityInt),
+ DeniedParameters: map[string][]interface{}{"zip": []interface{}{}, "zap": []interface{}{}},
+ },
+ Glob: false,
+ },
+ &PathCapabilities{
+ Prefix: "biz/bar",
+ Policy: "",
+ Capabilities: []string{
+ "create",
+ "sudo",
+ },
+ AllowedParametersHCL: map[string][]interface{}{"zim": {}, "zam": {}},
+ DeniedParametersHCL: map[string][]interface{}{"zip": {}, "zap": {}},
+ Permissions: &Permissions{
+ CapabilitiesBitmap: (CreateCapabilityInt | SudoCapabilityInt),
+ AllowedParameters: map[string][]interface{}{"zim": {}, "zam": {}},
+ DeniedParameters: map[string][]interface{}{"zip": {}, "zap": {}},
+ },
+ Glob: false,
+ },
+ &PathCapabilities{
+ Prefix: "test/types",
+ Policy: "",
+ Capabilities: []string{
+ "create",
+ "sudo",
+ },
+ AllowedParametersHCL: map[string][]interface{}{"map": []interface{}{map[string]interface{}{"good": "one"}}, "int": []interface{}{1, 2}},
+ DeniedParametersHCL: map[string][]interface{}{"string": []interface{}{"test"}, "bool": []interface{}{false}},
+ Permissions: &Permissions{
+ CapabilitiesBitmap: (CreateCapabilityInt | SudoCapabilityInt),
+ AllowedParameters: map[string][]interface{}{"map": []interface{}{map[string]interface{}{"good": "one"}}, "int": []interface{}{1, 2}},
+ DeniedParameters: map[string][]interface{}{"string": []interface{}{"test"}, "bool": []interface{}{false}},
+ },
+ Glob: false,
+ },
+ }
+ if !reflect.DeepEqual(p.Paths, expect) {
+ t.Errorf("expected \n\n%#v\n\n to be \n\n%#v\n\n", p.Paths, expect)
+ }
+}
+
+func TestPolicy_ParseBadRoot(t *testing.T) {
+ _, err := Parse(strings.TrimSpace(`
+name = "test"
+bad = "foo"
+nope = "yes"
+`))
+ if err == nil {
+ t.Fatalf("expected error")
+ }
+
+ if !strings.Contains(err.Error(), "invalid key 'bad' on line 2") {
+ t.Errorf("bad error: %q", err)
+ }
+
+ if !strings.Contains(err.Error(), "invalid key 'nope' on line 3") {
+ t.Errorf("bad error: %q", err)
+ }
+}
+
+func TestPolicy_ParseBadPath(t *testing.T) {
+ _, err := Parse(strings.TrimSpace(`
+path "/" {
+ capabilities = ["read"]
+ capabilites = ["read"]
+}
+`))
+ if err == nil {
+ t.Fatalf("expected error")
+ }
+
+ if !strings.Contains(err.Error(), "invalid key 'capabilites' on line 3") {
+ t.Errorf("bad error: %s", err)
+ }
+}
+
+func TestPolicy_ParseBadPolicy(t *testing.T) {
+ _, err := Parse(strings.TrimSpace(`
+path "/" {
+ policy = "banana"
+}
+`))
+ if err == nil {
+ t.Fatalf("expected error")
+ }
+
+ if !strings.Contains(err.Error(), `path "/": invalid policy 'banana'`) {
+ t.Errorf("bad error: %s", err)
+ }
+}
+
+func TestPolicy_ParseBadWrapping(t *testing.T) {
+ _, err := Parse(strings.TrimSpace(`
+path "/" {
+ policy = "read"
+ min_wrapping_ttl = 400
+ max_wrapping_ttl = 200
+}
+`))
+ if err == nil {
+ t.Fatalf("expected error")
+ }
+
+ if !strings.Contains(err.Error(), `max_wrapping_ttl cannot be less than min_wrapping_ttl`) {
+ t.Errorf("bad error: %s", err)
+ }
+}
+
+func TestPolicy_ParseBadCapabilities(t *testing.T) {
+ _, err := Parse(strings.TrimSpace(`
+path "/" {
+ capabilities = ["read", "banana"]
+}
+`))
+ if err == nil {
+ t.Fatalf("expected error")
+ }
+
+ if !strings.Contains(err.Error(), `path "/": invalid capability 'banana'`) {
+ t.Errorf("bad error: %s", err)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/rekey.go b/vendor/github.com/hashicorp/vault/vault/rekey.go
new file mode 100644
index 0000000..adebf09
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/rekey.go
@@ -0,0 +1,689 @@
+package vault
+
+import (
+ "bytes"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/helper/consts"
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/helper/pgpkeys"
+ "github.com/hashicorp/vault/physical"
+ "github.com/hashicorp/vault/shamir"
+)
+
+const (
+ // coreUnsealKeysBackupPath is the path used to back upencrypted unseal
+ // keys if specified during a rekey operation. This is outside of the
+ // barrier.
+ coreBarrierUnsealKeysBackupPath = "core/unseal-keys-backup"
+
+ // coreRecoveryUnsealKeysBackupPath is the path used to back upencrypted
+ // recovery keys if specified during a rekey operation. This is outside of
+ // the barrier.
+ coreRecoveryUnsealKeysBackupPath = "core/recovery-keys-backup"
+)
+
+// RekeyResult is used to provide the key parts back after
+// they are generated as part of the rekey.
+type RekeyResult struct {
+ SecretShares [][]byte
+ PGPFingerprints []string
+ Backup bool
+ RecoveryKey bool
+}
+
+// RekeyBackup stores the backup copy of PGP-encrypted keys
+type RekeyBackup struct {
+ Nonce string
+ Keys map[string][]string
+}
+
+func (c *Core) RekeyThreshold(recovery bool) (int, error) {
+ c.stateLock.RLock()
+ defer c.stateLock.RUnlock()
+ if c.sealed {
+ return 0, consts.ErrSealed
+ }
+ if c.standby {
+ return 0, consts.ErrStandby
+ }
+
+ c.rekeyLock.RLock()
+ defer c.rekeyLock.RUnlock()
+
+ var config *SealConfig
+ var err error
+ if recovery {
+ config, err = c.seal.RecoveryConfig()
+ } else {
+ config, err = c.seal.BarrierConfig()
+ }
+ if err != nil {
+ return 0, err
+ }
+
+ return config.SecretThreshold, nil
+}
+
+// RekeyProgress is used to return the rekey progress (num shares)
+func (c *Core) RekeyProgress(recovery bool) (int, error) {
+ c.stateLock.RLock()
+ defer c.stateLock.RUnlock()
+ if c.sealed {
+ return 0, consts.ErrSealed
+ }
+ if c.standby {
+ return 0, consts.ErrStandby
+ }
+
+ c.rekeyLock.RLock()
+ defer c.rekeyLock.RUnlock()
+
+ if recovery {
+ return len(c.recoveryRekeyProgress), nil
+ }
+ return len(c.barrierRekeyProgress), nil
+}
+
+// RekeyConfig is used to read the rekey configuration
+func (c *Core) RekeyConfig(recovery bool) (*SealConfig, error) {
+ c.stateLock.RLock()
+ defer c.stateLock.RUnlock()
+ if c.sealed {
+ return nil, consts.ErrSealed
+ }
+ if c.standby {
+ return nil, consts.ErrStandby
+ }
+
+ c.rekeyLock.Lock()
+ defer c.rekeyLock.Unlock()
+
+ // Copy the seal config if any
+ var conf *SealConfig
+ if recovery {
+ if c.recoveryRekeyConfig != nil {
+ conf = c.recoveryRekeyConfig.Clone()
+ }
+ } else {
+ if c.barrierRekeyConfig != nil {
+ conf = c.barrierRekeyConfig.Clone()
+ }
+ }
+
+ return conf, nil
+}
+
+func (c *Core) RekeyInit(config *SealConfig, recovery bool) error {
+ if recovery {
+ return c.RecoveryRekeyInit(config)
+ }
+ return c.BarrierRekeyInit(config)
+}
+
+// BarrierRekeyInit is used to initialize the rekey settings for the barrier key
+func (c *Core) BarrierRekeyInit(config *SealConfig) error {
+ if config.StoredShares > 0 {
+ if !c.seal.StoredKeysSupported() {
+ return fmt.Errorf("storing keys not supported by barrier seal")
+ }
+ if len(config.PGPKeys) > 0 {
+ return fmt.Errorf("PGP key encryption not supported when using stored keys")
+ }
+ if config.Backup {
+ return fmt.Errorf("key backup not supported when using stored keys")
+ }
+ }
+
+ // Check if the seal configuration is valid
+ if err := config.Validate(); err != nil {
+ c.logger.Error("core: invalid rekey seal configuration", "error", err)
+ return fmt.Errorf("invalid rekey seal configuration: %v", err)
+ }
+
+ c.stateLock.RLock()
+ defer c.stateLock.RUnlock()
+ if c.sealed {
+ return consts.ErrSealed
+ }
+ if c.standby {
+ return consts.ErrStandby
+ }
+
+ c.rekeyLock.Lock()
+ defer c.rekeyLock.Unlock()
+
+ // Prevent multiple concurrent re-keys
+ if c.barrierRekeyConfig != nil {
+ return fmt.Errorf("rekey already in progress")
+ }
+
+ // Copy the configuration
+ c.barrierRekeyConfig = config.Clone()
+
+ // Initialize the nonce
+ nonce, err := uuid.GenerateUUID()
+ if err != nil {
+ c.barrierRekeyConfig = nil
+ return err
+ }
+ c.barrierRekeyConfig.Nonce = nonce
+
+ if c.logger.IsInfo() {
+ c.logger.Info("core: rekey initialized", "nonce", c.barrierRekeyConfig.Nonce, "shares", c.barrierRekeyConfig.SecretShares, "threshold", c.barrierRekeyConfig.SecretThreshold)
+ }
+ return nil
+}
+
+// RecoveryRekeyInit is used to initialize the rekey settings for the recovery key
+func (c *Core) RecoveryRekeyInit(config *SealConfig) error {
+ if config.StoredShares > 0 {
+ return fmt.Errorf("stored shares not supported by recovery key")
+ }
+
+ // Check if the seal configuration is valid
+ if err := config.Validate(); err != nil {
+ c.logger.Error("core: invalid recovery configuration", "error", err)
+ return fmt.Errorf("invalid recovery configuration: %v", err)
+ }
+
+ if !c.seal.RecoveryKeySupported() {
+ return fmt.Errorf("recovery keys not supported")
+ }
+
+ c.stateLock.RLock()
+ defer c.stateLock.RUnlock()
+ if c.sealed {
+ return consts.ErrSealed
+ }
+ if c.standby {
+ return consts.ErrStandby
+ }
+
+ c.rekeyLock.Lock()
+ defer c.rekeyLock.Unlock()
+
+ // Prevent multiple concurrent re-keys
+ if c.recoveryRekeyConfig != nil {
+ return fmt.Errorf("rekey already in progress")
+ }
+
+ // Copy the configuration
+ c.recoveryRekeyConfig = config.Clone()
+
+ // Initialize the nonce
+ nonce, err := uuid.GenerateUUID()
+ if err != nil {
+ c.recoveryRekeyConfig = nil
+ return err
+ }
+ c.recoveryRekeyConfig.Nonce = nonce
+
+ if c.logger.IsInfo() {
+ c.logger.Info("core: rekey initialized", "nonce", c.recoveryRekeyConfig.Nonce, "shares", c.recoveryRekeyConfig.SecretShares, "threshold", c.recoveryRekeyConfig.SecretThreshold)
+ }
+ return nil
+}
+
+func (c *Core) RekeyUpdate(key []byte, nonce string, recovery bool) (*RekeyResult, error) {
+ if recovery {
+ return c.RecoveryRekeyUpdate(key, nonce)
+ }
+ return c.BarrierRekeyUpdate(key, nonce)
+}
+
+// BarrierRekeyUpdate is used to provide a new key part
+func (c *Core) BarrierRekeyUpdate(key []byte, nonce string) (*RekeyResult, error) {
+ // Ensure we are already unsealed
+ c.stateLock.RLock()
+ defer c.stateLock.RUnlock()
+ if c.sealed {
+ return nil, consts.ErrSealed
+ }
+ if c.standby {
+ return nil, consts.ErrStandby
+ }
+
+ // Verify the key length
+ min, max := c.barrier.KeyLength()
+ max += shamir.ShareOverhead
+ if len(key) < min {
+ return nil, &ErrInvalidKey{fmt.Sprintf("key is shorter than minimum %d bytes", min)}
+ }
+ if len(key) > max {
+ return nil, &ErrInvalidKey{fmt.Sprintf("key is longer than maximum %d bytes", max)}
+ }
+
+ c.rekeyLock.Lock()
+ defer c.rekeyLock.Unlock()
+
+ // Get the seal configuration
+ existingConfig, err := c.seal.BarrierConfig()
+ if err != nil {
+ return nil, err
+ }
+
+ // Ensure the barrier is initialized
+ if existingConfig == nil {
+ return nil, ErrNotInit
+ }
+
+ // Ensure a rekey is in progress
+ if c.barrierRekeyConfig == nil {
+ return nil, fmt.Errorf("no rekey in progress")
+ }
+
+ if nonce != c.barrierRekeyConfig.Nonce {
+ return nil, fmt.Errorf("incorrect nonce supplied; nonce for this rekey operation is %s", c.barrierRekeyConfig.Nonce)
+ }
+
+ // Check if we already have this piece
+ for _, existing := range c.barrierRekeyProgress {
+ if bytes.Equal(existing, key) {
+ return nil, fmt.Errorf("given key has already been provided during this generation operation")
+ }
+ }
+
+ // Store this key
+ c.barrierRekeyProgress = append(c.barrierRekeyProgress, key)
+
+ // Check if we don't have enough keys to unlock
+ if len(c.barrierRekeyProgress) < existingConfig.SecretThreshold {
+ if c.logger.IsDebug() {
+ c.logger.Debug("core: cannot rekey yet, not enough keys", "keys", len(c.barrierRekeyProgress), "threshold", existingConfig.SecretThreshold)
+ }
+ return nil, nil
+ }
+
+ // Recover the master key
+ var masterKey []byte
+ if existingConfig.SecretThreshold == 1 {
+ masterKey = c.barrierRekeyProgress[0]
+ c.barrierRekeyProgress = nil
+ } else {
+ masterKey, err = shamir.Combine(c.barrierRekeyProgress)
+ c.barrierRekeyProgress = nil
+ if err != nil {
+ return nil, fmt.Errorf("failed to compute master key: %v", err)
+ }
+ }
+
+ if err := c.barrier.VerifyMaster(masterKey); err != nil {
+ c.logger.Error("core: rekey aborted, master key verification failed", "error", err)
+ return nil, err
+ }
+
+ // Generate a new master key
+ newMasterKey, err := c.barrier.GenerateKey()
+ if err != nil {
+ c.logger.Error("core: failed to generate master key", "error", err)
+ return nil, fmt.Errorf("master key generation failed: %v", err)
+ }
+
+ // Return the master key if only a single key part is used
+ results := &RekeyResult{
+ Backup: c.barrierRekeyConfig.Backup,
+ }
+
+ if c.barrierRekeyConfig.SecretShares == 1 {
+ results.SecretShares = append(results.SecretShares, newMasterKey)
+ } else {
+ // Split the master key using the Shamir algorithm
+ shares, err := shamir.Split(newMasterKey, c.barrierRekeyConfig.SecretShares, c.barrierRekeyConfig.SecretThreshold)
+ if err != nil {
+ c.logger.Error("core: failed to generate shares", "error", err)
+ return nil, fmt.Errorf("failed to generate shares: %v", err)
+ }
+ results.SecretShares = shares
+ }
+
+ // If we are storing any shares, add them to the shares to store and remove
+ // from the returned keys
+ var keysToStore [][]byte
+ if c.barrierRekeyConfig.StoredShares > 0 {
+ for i := 0; i < c.barrierRekeyConfig.StoredShares; i++ {
+ keysToStore = append(keysToStore, results.SecretShares[0])
+ results.SecretShares = results.SecretShares[1:]
+ }
+ }
+
+ if len(c.barrierRekeyConfig.PGPKeys) > 0 {
+ hexEncodedShares := make([][]byte, len(results.SecretShares))
+ for i, _ := range results.SecretShares {
+ hexEncodedShares[i] = []byte(hex.EncodeToString(results.SecretShares[i]))
+ }
+ results.PGPFingerprints, results.SecretShares, err = pgpkeys.EncryptShares(hexEncodedShares, c.barrierRekeyConfig.PGPKeys)
+ if err != nil {
+ return nil, err
+ }
+
+ if c.barrierRekeyConfig.Backup {
+ backupInfo := map[string][]string{}
+ for i := 0; i < len(results.PGPFingerprints); i++ {
+ encShare := bytes.NewBuffer(results.SecretShares[i])
+ if backupInfo[results.PGPFingerprints[i]] == nil {
+ backupInfo[results.PGPFingerprints[i]] = []string{hex.EncodeToString(encShare.Bytes())}
+ } else {
+ backupInfo[results.PGPFingerprints[i]] = append(backupInfo[results.PGPFingerprints[i]], hex.EncodeToString(encShare.Bytes()))
+ }
+ }
+
+ backupVals := &RekeyBackup{
+ Nonce: c.barrierRekeyConfig.Nonce,
+ Keys: backupInfo,
+ }
+ buf, err := json.Marshal(backupVals)
+ if err != nil {
+ c.logger.Error("core: failed to marshal unseal key backup", "error", err)
+ return nil, fmt.Errorf("failed to marshal unseal key backup: %v", err)
+ }
+ pe := &physical.Entry{
+ Key: coreBarrierUnsealKeysBackupPath,
+ Value: buf,
+ }
+ if err = c.physical.Put(pe); err != nil {
+ c.logger.Error("core: failed to save unseal key backup", "error", err)
+ return nil, fmt.Errorf("failed to save unseal key backup: %v", err)
+ }
+ }
+ }
+
+ if keysToStore != nil {
+ if err := c.seal.SetStoredKeys(keysToStore); err != nil {
+ c.logger.Error("core: failed to store keys", "error", err)
+ return nil, fmt.Errorf("failed to store keys: %v", err)
+ }
+ }
+
+ // Rekey the barrier
+ if err := c.barrier.Rekey(newMasterKey); err != nil {
+ c.logger.Error("core: failed to rekey barrier", "error", err)
+ return nil, fmt.Errorf("failed to rekey barrier: %v", err)
+ }
+ if c.logger.IsInfo() {
+ c.logger.Info("core: security barrier rekeyed", "shares", c.barrierRekeyConfig.SecretShares, "threshold", c.barrierRekeyConfig.SecretThreshold)
+ }
+ if err := c.seal.SetBarrierConfig(c.barrierRekeyConfig); err != nil {
+ c.logger.Error("core: error saving rekey seal configuration", "error", err)
+ return nil, fmt.Errorf("failed to save rekey seal configuration: %v", err)
+ }
+
+ // Write to the canary path, which will force a synchronous truing during
+ // replication
+ if err := c.barrier.Put(&Entry{
+ Key: coreKeyringCanaryPath,
+ Value: []byte(c.barrierRekeyConfig.Nonce),
+ }); err != nil {
+ c.logger.Error("core: error saving keyring canary", "error", err)
+ return nil, fmt.Errorf("failed to save keyring canary: %v", err)
+ }
+
+ // Done!
+ c.barrierRekeyProgress = nil
+ c.barrierRekeyConfig = nil
+ return results, nil
+}
+
+// RecoveryRekeyUpdate is used to provide a new key part
+func (c *Core) RecoveryRekeyUpdate(key []byte, nonce string) (*RekeyResult, error) {
+ // Ensure we are already unsealed
+ c.stateLock.RLock()
+ defer c.stateLock.RUnlock()
+ if c.sealed {
+ return nil, consts.ErrSealed
+ }
+ if c.standby {
+ return nil, consts.ErrStandby
+ }
+
+ // Verify the key length
+ min, max := c.barrier.KeyLength()
+ max += shamir.ShareOverhead
+ if len(key) < min {
+ return nil, &ErrInvalidKey{fmt.Sprintf("key is shorter than minimum %d bytes", min)}
+ }
+ if len(key) > max {
+ return nil, &ErrInvalidKey{fmt.Sprintf("key is longer than maximum %d bytes", max)}
+ }
+
+ c.rekeyLock.Lock()
+ defer c.rekeyLock.Unlock()
+
+ // Get the seal configuration
+ barrierConfig, err := c.seal.BarrierConfig()
+ if err != nil {
+ return nil, err
+ }
+
+ // Ensure the barrier is initialized
+ if barrierConfig == nil {
+ return nil, ErrNotInit
+ }
+
+ existingConfig, err := c.seal.RecoveryConfig()
+ if err != nil {
+ return nil, err
+ }
+
+ // Ensure a rekey is in progress
+ if c.recoveryRekeyConfig == nil {
+ return nil, fmt.Errorf("no rekey in progress")
+ }
+
+ if nonce != c.recoveryRekeyConfig.Nonce {
+ return nil, fmt.Errorf("incorrect nonce supplied; nonce for this rekey operation is %s", c.recoveryRekeyConfig.Nonce)
+ }
+
+ // Check if we already have this piece
+ for _, existing := range c.recoveryRekeyProgress {
+ if bytes.Equal(existing, key) {
+ return nil, nil
+ }
+ }
+
+ // Store this key
+ c.recoveryRekeyProgress = append(c.recoveryRekeyProgress, key)
+
+ // Check if we don't have enough keys to unlock
+ if len(c.recoveryRekeyProgress) < existingConfig.SecretThreshold {
+ if c.logger.IsDebug() {
+ c.logger.Debug("core: cannot rekey yet, not enough keys", "keys", len(c.recoveryRekeyProgress), "threshold", existingConfig.SecretThreshold)
+ }
+ return nil, nil
+ }
+
+ // Recover the master key
+ var masterKey []byte
+ if existingConfig.SecretThreshold == 1 {
+ masterKey = c.recoveryRekeyProgress[0]
+ c.recoveryRekeyProgress = nil
+ } else {
+ masterKey, err = shamir.Combine(c.recoveryRekeyProgress)
+ c.recoveryRekeyProgress = nil
+ if err != nil {
+ return nil, fmt.Errorf("failed to compute recovery key: %v", err)
+ }
+ }
+
+ // Verify the recovery key
+ if err := c.seal.VerifyRecoveryKey(masterKey); err != nil {
+ c.logger.Error("core: rekey aborted, recovery key verification failed", "error", err)
+ return nil, err
+ }
+
+ // Generate a new master key
+ newMasterKey, err := c.barrier.GenerateKey()
+ if err != nil {
+ c.logger.Error("core: failed to generate recovery key", "error", err)
+ return nil, fmt.Errorf("recovery key generation failed: %v", err)
+ }
+
+ // Return the master key if only a single key part is used
+ results := &RekeyResult{
+ Backup: c.recoveryRekeyConfig.Backup,
+ }
+
+ if c.recoveryRekeyConfig.SecretShares == 1 {
+ results.SecretShares = append(results.SecretShares, newMasterKey)
+ } else {
+ // Split the master key using the Shamir algorithm
+ shares, err := shamir.Split(newMasterKey, c.recoveryRekeyConfig.SecretShares, c.recoveryRekeyConfig.SecretThreshold)
+ if err != nil {
+ c.logger.Error("core: failed to generate shares", "error", err)
+ return nil, fmt.Errorf("failed to generate shares: %v", err)
+ }
+ results.SecretShares = shares
+ }
+
+ if len(c.recoveryRekeyConfig.PGPKeys) > 0 {
+ hexEncodedShares := make([][]byte, len(results.SecretShares))
+ for i, _ := range results.SecretShares {
+ hexEncodedShares[i] = []byte(hex.EncodeToString(results.SecretShares[i]))
+ }
+ results.PGPFingerprints, results.SecretShares, err = pgpkeys.EncryptShares(hexEncodedShares, c.recoveryRekeyConfig.PGPKeys)
+ if err != nil {
+ return nil, err
+ }
+
+ if c.recoveryRekeyConfig.Backup {
+ backupInfo := map[string][]string{}
+ for i := 0; i < len(results.PGPFingerprints); i++ {
+ encShare := bytes.NewBuffer(results.SecretShares[i])
+ if backupInfo[results.PGPFingerprints[i]] == nil {
+ backupInfo[results.PGPFingerprints[i]] = []string{hex.EncodeToString(encShare.Bytes())}
+ } else {
+ backupInfo[results.PGPFingerprints[i]] = append(backupInfo[results.PGPFingerprints[i]], hex.EncodeToString(encShare.Bytes()))
+ }
+ }
+
+ backupVals := &RekeyBackup{
+ Nonce: c.recoveryRekeyConfig.Nonce,
+ Keys: backupInfo,
+ }
+ buf, err := json.Marshal(backupVals)
+ if err != nil {
+ c.logger.Error("core: failed to marshal recovery key backup", "error", err)
+ return nil, fmt.Errorf("failed to marshal recovery key backup: %v", err)
+ }
+ pe := &physical.Entry{
+ Key: coreRecoveryUnsealKeysBackupPath,
+ Value: buf,
+ }
+ if err = c.physical.Put(pe); err != nil {
+ c.logger.Error("core: failed to save unseal key backup", "error", err)
+ return nil, fmt.Errorf("failed to save unseal key backup: %v", err)
+ }
+ }
+ }
+
+ if err := c.seal.SetRecoveryKey(newMasterKey); err != nil {
+ c.logger.Error("core: failed to set recovery key", "error", err)
+ return nil, fmt.Errorf("failed to set recovery key: %v", err)
+ }
+
+ if err := c.seal.SetRecoveryConfig(c.recoveryRekeyConfig); err != nil {
+ c.logger.Error("core: error saving rekey seal configuration", "error", err)
+ return nil, fmt.Errorf("failed to save rekey seal configuration: %v", err)
+ }
+
+ // Write to the canary path, which will force a synchronous truing during
+ // replication
+ if err := c.barrier.Put(&Entry{
+ Key: coreKeyringCanaryPath,
+ Value: []byte(c.recoveryRekeyConfig.Nonce),
+ }); err != nil {
+ c.logger.Error("core: error saving keyring canary", "error", err)
+ return nil, fmt.Errorf("failed to save keyring canary: %v", err)
+ }
+
+ // Done!
+ c.recoveryRekeyProgress = nil
+ c.recoveryRekeyConfig = nil
+ return results, nil
+}
+
+// RekeyCancel is used to cancel an inprogress rekey
+func (c *Core) RekeyCancel(recovery bool) error {
+ c.stateLock.RLock()
+ defer c.stateLock.RUnlock()
+ if c.sealed {
+ return consts.ErrSealed
+ }
+ if c.standby {
+ return consts.ErrStandby
+ }
+
+ c.rekeyLock.Lock()
+ defer c.rekeyLock.Unlock()
+
+ // Clear any progress or config
+ if recovery {
+ c.recoveryRekeyConfig = nil
+ c.recoveryRekeyProgress = nil
+ } else {
+ c.barrierRekeyConfig = nil
+ c.barrierRekeyProgress = nil
+ }
+ return nil
+}
+
+// RekeyRetrieveBackup is used to retrieve any backed-up PGP-encrypted unseal
+// keys
+func (c *Core) RekeyRetrieveBackup(recovery bool) (*RekeyBackup, error) {
+ c.stateLock.RLock()
+ defer c.stateLock.RUnlock()
+ if c.sealed {
+ return nil, consts.ErrSealed
+ }
+ if c.standby {
+ return nil, consts.ErrStandby
+ }
+
+ c.rekeyLock.RLock()
+ defer c.rekeyLock.RUnlock()
+
+ var entry *physical.Entry
+ var err error
+ if recovery {
+ entry, err = c.physical.Get(coreRecoveryUnsealKeysBackupPath)
+ } else {
+ entry, err = c.physical.Get(coreBarrierUnsealKeysBackupPath)
+ }
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ ret := &RekeyBackup{}
+ err = jsonutil.DecodeJSON(entry.Value, ret)
+ if err != nil {
+ return nil, err
+ }
+
+ return ret, nil
+}
+
+// RekeyDeleteBackup is used to delete any backed-up PGP-encrypted unseal keys
+func (c *Core) RekeyDeleteBackup(recovery bool) error {
+ c.stateLock.RLock()
+ defer c.stateLock.RUnlock()
+ if c.sealed {
+ return consts.ErrSealed
+ }
+ if c.standby {
+ return consts.ErrStandby
+ }
+
+ c.rekeyLock.Lock()
+ defer c.rekeyLock.Unlock()
+
+ if recovery {
+ return c.physical.Delete(coreRecoveryUnsealKeysBackupPath)
+ }
+ return c.physical.Delete(coreBarrierUnsealKeysBackupPath)
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/rekey_test.go b/vendor/github.com/hashicorp/vault/vault/rekey_test.go
new file mode 100644
index 0000000..c463325
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/rekey_test.go
@@ -0,0 +1,483 @@
+package vault
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+
+ log "github.com/mgutz/logxi/v1"
+
+ "github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/physical"
+)
+
+func TestCore_Rekey_Lifecycle(t *testing.T) {
+ bc, rc := TestSealDefConfigs()
+ bc.SecretShares = 1
+ bc.SecretThreshold = 1
+ bc.StoredShares = 0
+ c, masterKeys, recoveryKeys, _ := TestCoreUnsealedWithConfigs(t, bc, rc)
+ if len(masterKeys) != 1 {
+ t.Fatalf("expected %d keys, got %d", bc.SecretShares-bc.StoredShares, len(masterKeys))
+ }
+ testCore_Rekey_Lifecycle_Common(t, c, masterKeys, false)
+
+ bc, rc = TestSealDefConfigs()
+ c, masterKeys, recoveryKeys, _ = TestCoreUnsealedWithConfigs(t, bc, rc)
+ if len(masterKeys) != 3 {
+ t.Fatalf("expected %d keys, got %d", bc.SecretShares-bc.StoredShares, len(masterKeys))
+ }
+ testCore_Rekey_Lifecycle_Common(t, c, masterKeys, false)
+ testCore_Rekey_Lifecycle_Common(t, c, recoveryKeys, true)
+}
+
+func testCore_Rekey_Lifecycle_Common(t *testing.T, c *Core, masterKeys [][]byte, recovery bool) {
+ // Verify update not allowed
+ if _, err := c.RekeyUpdate(masterKeys[0], "", recovery); err == nil {
+ t.Fatalf("no rekey should be in progress")
+ }
+
+ // Should be no progress
+ num, err := c.RekeyProgress(recovery)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if num != 0 {
+ t.Fatalf("bad: %d", num)
+ }
+
+ // Should be no config
+ conf, err := c.RekeyConfig(recovery)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if conf != nil {
+ t.Fatalf("bad: %v", conf)
+ }
+
+ // Cancel should be idempotent
+ err = c.RekeyCancel(false)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Start a rekey
+ newConf := &SealConfig{
+ SecretThreshold: 3,
+ SecretShares: 5,
+ }
+ err = c.RekeyInit(newConf, recovery)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should get config
+ conf, err = c.RekeyConfig(recovery)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ newConf.Nonce = conf.Nonce
+ if !reflect.DeepEqual(conf, newConf) {
+ t.Fatalf("bad: %v", conf)
+ }
+
+ // Cancel should be clear
+ err = c.RekeyCancel(recovery)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should be no config
+ conf, err = c.RekeyConfig(recovery)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if conf != nil {
+ t.Fatalf("bad: %v", conf)
+ }
+}
+
+func TestCore_Rekey_Init(t *testing.T) {
+ c, _, _ := TestCoreUnsealed(t)
+ testCore_Rekey_Init_Common(t, c, false)
+
+ bc, rc := TestSealDefConfigs()
+ c, _, _, _ = TestCoreUnsealedWithConfigs(t, bc, rc)
+ testCore_Rekey_Init_Common(t, c, false)
+ testCore_Rekey_Init_Common(t, c, true)
+}
+
+func testCore_Rekey_Init_Common(t *testing.T, c *Core, recovery bool) {
+ // Try an invalid config
+ badConf := &SealConfig{
+ SecretThreshold: 5,
+ SecretShares: 1,
+ }
+ err := c.RekeyInit(badConf, recovery)
+ if err == nil {
+ t.Fatalf("should fail")
+ }
+
+ // Start a rekey
+ newConf := &SealConfig{
+ SecretThreshold: 3,
+ SecretShares: 5,
+ }
+ err = c.RekeyInit(newConf, recovery)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Second should fail
+ err = c.RekeyInit(newConf, recovery)
+ if err == nil {
+ t.Fatalf("should fail")
+ }
+}
+
+func TestCore_Rekey_Update(t *testing.T) {
+ bc, rc := TestSealDefConfigs()
+ bc.SecretShares = 1
+ bc.SecretThreshold = 1
+ bc.StoredShares = 0
+ c, masterKeys, _, root := TestCoreUnsealedWithConfigs(t, bc, rc)
+ testCore_Rekey_Update_Common(t, c, masterKeys, root, false)
+
+ bc, rc = TestSealDefConfigs()
+ bc.StoredShares = 0
+ c, masterKeys, recoveryKeys, root := TestCoreUnsealedWithConfigs(t, bc, rc)
+ testCore_Rekey_Update_Common(t, c, masterKeys, root, false)
+ testCore_Rekey_Update_Common(t, c, recoveryKeys, root, true)
+}
+
+func testCore_Rekey_Update_Common(t *testing.T, c *Core, keys [][]byte, root string, recovery bool) {
+ // Start a rekey
+ var expType string
+ if recovery {
+ expType = c.seal.RecoveryType()
+ } else {
+ expType = c.seal.BarrierType()
+ }
+
+ newConf := &SealConfig{
+ Type: expType,
+ SecretThreshold: 3,
+ SecretShares: 5,
+ }
+ err := c.RekeyInit(newConf, recovery)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Fetch new config with generated nonce
+ rkconf, err := c.RekeyConfig(recovery)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if rkconf == nil {
+ t.Fatalf("bad: no rekey config received")
+ }
+
+ // Provide the master
+ var result *RekeyResult
+ for _, key := range keys {
+ result, err = c.RekeyUpdate(key, rkconf.Nonce, recovery)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if result != nil {
+ break
+ }
+ }
+ if result == nil || len(result.SecretShares) != newConf.SecretShares {
+ t.Fatalf("Bad: %#v", result)
+ }
+
+ // Should be no progress
+ num, err := c.RekeyProgress(recovery)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if num != 0 {
+ t.Fatalf("bad: %d", num)
+ }
+
+ // Should be no config
+ conf, err := c.RekeyConfig(recovery)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if conf != nil {
+ t.Fatalf("bad: %v", conf)
+ }
+
+ // SealConfig should update
+ var sealConf *SealConfig
+ if recovery {
+ sealConf, err = c.seal.RecoveryConfig()
+ } else {
+ sealConf, err = c.seal.BarrierConfig()
+ }
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if sealConf == nil {
+ t.Fatal("seal configuration is nil")
+ }
+
+ newConf.Nonce = rkconf.Nonce
+ if !reflect.DeepEqual(sealConf, newConf) {
+ t.Fatalf("\nexpected: %#v\nactual: %#v\nexpType: %s\nrecovery: %t", newConf, sealConf, expType, recovery)
+ }
+
+ // Attempt unseal if this was not recovery mode
+ if !recovery {
+ err = c.Seal(root)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ for i := 0; i < 3; i++ {
+ _, err = TestCoreUnseal(c, TestKeyCopy(result.SecretShares[i]))
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ }
+ if sealed, _ := c.Sealed(); sealed {
+ t.Fatalf("should be unsealed")
+ }
+ }
+
+ // Start another rekey, this time we require a quorum!
+ newConf = &SealConfig{
+ Type: expType,
+ SecretThreshold: 1,
+ SecretShares: 1,
+ }
+ err = c.RekeyInit(newConf, recovery)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Fetch new config with generated nonce
+ rkconf, err = c.RekeyConfig(recovery)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if rkconf == nil {
+ t.Fatalf("bad: no rekey config received")
+ }
+
+ // Provide the parts master
+ oldResult := result
+ for i := 0; i < 3; i++ {
+ result, err = c.RekeyUpdate(TestKeyCopy(oldResult.SecretShares[i]), rkconf.Nonce, recovery)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should be progress
+ num, err := c.RekeyProgress(recovery)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if (i == 2 && num != 0) || (i != 2 && num != i+1) {
+ t.Fatalf("bad: %d", num)
+ }
+ }
+ if result == nil || len(result.SecretShares) != 1 {
+ t.Fatalf("Bad: %#v", result)
+ }
+
+ // Attempt unseal if this was not recovery mode
+ if !recovery {
+ err = c.Seal(root)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ unseal, err := TestCoreUnseal(c, result.SecretShares[0])
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !unseal {
+ t.Fatalf("should be unsealed")
+ }
+ }
+
+ // SealConfig should update
+ if recovery {
+ sealConf, err = c.seal.RecoveryConfig()
+ } else {
+ sealConf, err = c.seal.BarrierConfig()
+ }
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ newConf.Nonce = rkconf.Nonce
+ if !reflect.DeepEqual(sealConf, newConf) {
+ t.Fatalf("bad: %#v", sealConf)
+ }
+}
+
+func TestCore_Rekey_Invalid(t *testing.T) {
+ bc, rc := TestSealDefConfigs()
+ bc.StoredShares = 0
+ bc.SecretShares = 1
+ bc.SecretThreshold = 1
+ rc.SecretShares = 1
+ rc.SecretThreshold = 1
+ c, masterKeys, recoveryKeys, _ := TestCoreUnsealedWithConfigs(t, bc, rc)
+ testCore_Rekey_Invalid_Common(t, c, masterKeys, false)
+ testCore_Rekey_Invalid_Common(t, c, recoveryKeys, true)
+}
+
+func testCore_Rekey_Invalid_Common(t *testing.T, c *Core, keys [][]byte, recovery bool) {
+ // Start a rekey
+ newConf := &SealConfig{
+ SecretThreshold: 3,
+ SecretShares: 5,
+ }
+ err := c.RekeyInit(newConf, recovery)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Fetch new config with generated nonce
+ rkconf, err := c.RekeyConfig(recovery)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if rkconf == nil {
+ t.Fatalf("bad: no rekey config received")
+ }
+
+ // Provide the nonce (invalid)
+ _, err = c.RekeyUpdate(keys[0], "abcd", recovery)
+ if err == nil {
+ t.Fatalf("expected error")
+ }
+
+ // Provide the key (invalid)
+ key := keys[0]
+ oldkeystr := fmt.Sprintf("%#v", key)
+ key[0]++
+ newkeystr := fmt.Sprintf("%#v", key)
+ ret, err := c.RekeyUpdate(key, rkconf.Nonce, recovery)
+ if err == nil {
+ t.Fatalf("expected error, ret is %#v\noldkeystr: %s\nnewkeystr: %s", *ret, oldkeystr, newkeystr)
+ }
+}
+
+func TestCore_Standby_Rekey(t *testing.T) {
+ // Create the first core and initialize it
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ inm := physical.NewInmem(logger)
+ inmha := physical.NewInmemHA(logger)
+ redirectOriginal := "http://127.0.0.1:8200"
+ core, err := NewCore(&CoreConfig{
+ Physical: inm,
+ HAPhysical: inmha,
+ RedirectAddr: redirectOriginal,
+ DisableMlock: true,
+ DisableCache: true,
+ })
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ keys, root := TestCoreInit(t, core)
+ for _, key := range keys {
+ if _, err := TestCoreUnseal(core, TestKeyCopy(key)); err != nil {
+ t.Fatalf("unseal err: %s", err)
+ }
+ }
+
+ // Wait for core to become active
+ TestWaitActive(t, core)
+
+ // Create a second core, attached to same in-memory store
+ redirectOriginal2 := "http://127.0.0.1:8500"
+ core2, err := NewCore(&CoreConfig{
+ Physical: inm,
+ HAPhysical: inmha,
+ RedirectAddr: redirectOriginal2,
+ DisableMlock: true,
+ DisableCache: true,
+ })
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ for _, key := range keys {
+ if _, err := TestCoreUnseal(core2, TestKeyCopy(key)); err != nil {
+ t.Fatalf("unseal err: %s", err)
+ }
+ }
+
+ // Rekey the master key
+ newConf := &SealConfig{
+ SecretShares: 1,
+ SecretThreshold: 1,
+ }
+ err = core.RekeyInit(newConf, false)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ // Fetch new config with generated nonce
+ rkconf, err := core.RekeyConfig(false)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if rkconf == nil {
+ t.Fatalf("bad: no rekey config received")
+ }
+ var rekeyResult *RekeyResult
+ for _, key := range keys {
+ rekeyResult, err = core.RekeyUpdate(key, rkconf.Nonce, false)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ }
+ if rekeyResult == nil {
+ t.Fatalf("rekey failed")
+ }
+
+ // Seal the first core, should step down
+ err = core.Seal(root)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Wait for core2 to become active
+ TestWaitActive(t, core2)
+
+ // Rekey the master key again
+ err = core2.RekeyInit(newConf, false)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ // Fetch new config with generated nonce
+ rkconf, err = core2.RekeyConfig(false)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if rkconf == nil {
+ t.Fatalf("bad: no rekey config received")
+ }
+ var rekeyResult2 *RekeyResult
+ for _, key := range rekeyResult.SecretShares {
+ rekeyResult2, err = core2.RekeyUpdate(key, rkconf.Nonce, false)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ }
+ if rekeyResult2 == nil {
+ t.Fatalf("rekey failed")
+ }
+
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if rekeyResult2 == nil {
+ t.Fatalf("rekey failed")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/request_forwarding.go b/vendor/github.com/hashicorp/vault/vault/request_forwarding.go
new file mode 100644
index 0000000..62cbf44
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/request_forwarding.go
@@ -0,0 +1,427 @@
+package vault
+
+import (
+ "bytes"
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/hashicorp/vault/helper/forwarding"
+ "golang.org/x/net/context"
+ "golang.org/x/net/http2"
+ "google.golang.org/grpc"
+)
+
+const (
+ clusterListenerAcceptDeadline = 500 * time.Millisecond
+)
+
+// Starts the listeners and servers necessary to handle forwarded requests
+func (c *Core) startForwarding() error {
+ c.logger.Trace("core: cluster listener setup function")
+ defer c.logger.Trace("core: leaving cluster listener setup function")
+
+ // Clean up in case we have transitioned from a client to a server
+ c.requestForwardingConnectionLock.Lock()
+ c.clearForwardingClients()
+ c.requestForwardingConnectionLock.Unlock()
+
+ // Resolve locally to avoid races
+ ha := c.ha != nil
+
+ // Get our base handler (for our RPC server) and our wrapped handler (for
+ // straight HTTP/2 forwarding)
+ baseHandler, wrappedHandler := c.clusterHandlerSetupFunc()
+
+ // Get our TLS config
+ tlsConfig, err := c.ClusterTLSConfig()
+ if err != nil {
+ c.logger.Error("core: failed to get tls configuration when starting forwarding", "error", err)
+ return err
+ }
+
+ // The server supports all of the possible protos
+ tlsConfig.NextProtos = []string{"h2", "req_fw_sb-act_v1"}
+
+ // Create our RPC server and register the request handler server
+ c.clusterParamsLock.Lock()
+
+ if c.rpcServer != nil {
+ c.logger.Warn("core: forwarding rpc server already running")
+ return nil
+ }
+
+ c.rpcServer = grpc.NewServer()
+
+ if ha {
+ RegisterRequestForwardingServer(c.rpcServer, &forwardedRequestRPCServer{
+ core: c,
+ handler: baseHandler,
+ })
+ }
+ c.clusterParamsLock.Unlock()
+
+ // Create the HTTP/2 server that will be shared by both RPC and regular
+ // duties. Doing it this way instead of listening via the server and gRPC
+ // allows us to re-use the same port via ALPN. We can just tell the server
+ // to serve a given conn and which handler to use.
+ fws := &http2.Server{}
+
+ // Shutdown coordination logic
+ var shutdown uint32
+ shutdownWg := &sync.WaitGroup{}
+
+ for _, addr := range c.clusterListenerAddrs {
+ shutdownWg.Add(1)
+
+ // Force a local resolution to avoid data races
+ laddr := addr
+
+ // Start our listening loop
+ go func() {
+ defer shutdownWg.Done()
+
+ if c.logger.IsInfo() {
+ c.logger.Info("core/startClusterListener: starting listener", "listener_address", laddr)
+ }
+
+ // Create a TCP listener. We do this separately and specifically
+ // with TCP so that we can set deadlines.
+ tcpLn, err := net.ListenTCP("tcp", laddr)
+ if err != nil {
+ c.logger.Error("core/startClusterListener: error starting listener", "error", err)
+ return
+ }
+
+ // Wrap the listener with TLS
+ tlsLn := tls.NewListener(tcpLn, tlsConfig)
+ defer tlsLn.Close()
+
+ if c.logger.IsInfo() {
+ c.logger.Info("core/startClusterListener: serving cluster requests", "cluster_listen_address", tlsLn.Addr())
+ }
+
+ for {
+ if atomic.LoadUint32(&shutdown) > 0 {
+ return
+ }
+
+ // Set the deadline for the accept call. If it passes we'll get
+ // an error, causing us to check the condition at the top
+ // again.
+ tcpLn.SetDeadline(time.Now().Add(clusterListenerAcceptDeadline))
+
+ // Accept the connection
+ conn, err := tlsLn.Accept()
+ if conn != nil {
+ // Always defer although it may be closed ahead of time
+ defer conn.Close()
+ }
+ if err != nil {
+ continue
+ }
+
+ // Type assert to TLS connection and handshake to populate the
+ // connection state
+ tlsConn := conn.(*tls.Conn)
+ err = tlsConn.Handshake()
+ if err != nil {
+ if c.logger.IsDebug() {
+ c.logger.Debug("core: error handshaking cluster connection", "error", err)
+ }
+ if conn != nil {
+ conn.Close()
+ }
+ continue
+ }
+
+ switch tlsConn.ConnectionState().NegotiatedProtocol {
+ case "h2":
+ if !ha {
+ conn.Close()
+ continue
+ }
+
+ c.logger.Trace("core: got h2 connection")
+ go fws.ServeConn(conn, &http2.ServeConnOpts{
+ Handler: wrappedHandler,
+ })
+
+ case "req_fw_sb-act_v1":
+ if !ha {
+ conn.Close()
+ continue
+ }
+
+ c.logger.Trace("core: got req_fw_sb-act_v1 connection")
+ go fws.ServeConn(conn, &http2.ServeConnOpts{
+ Handler: c.rpcServer,
+ })
+
+ default:
+ c.logger.Debug("core: unknown negotiated protocol on cluster port")
+ conn.Close()
+ continue
+ }
+ }
+ }()
+ }
+
+ // This is in its own goroutine so that we don't block the main thread, and
+ // thus we use atomic and channels to coordinate
+ // However, because you can't query the status of a channel, we set a bool
+ // here while we have the state lock to know whether to actually send a
+ // shutdown (e.g. whether the channel will block). See issue #2083.
+ c.clusterListenersRunning = true
+ go func() {
+ // If we get told to shut down...
+ <-c.clusterListenerShutdownCh
+
+ // Stop the RPC server
+ c.logger.Info("core: shutting down forwarding rpc listeners")
+ c.clusterParamsLock.Lock()
+ c.rpcServer.Stop()
+ c.rpcServer = nil
+ c.clusterParamsLock.Unlock()
+ c.logger.Info("core: forwarding rpc listeners stopped")
+
+ // Set the shutdown flag. This will cause the listeners to shut down
+ // within the deadline in clusterListenerAcceptDeadline
+ atomic.StoreUint32(&shutdown, 1)
+
+ // Wait for them all to shut down
+ shutdownWg.Wait()
+ c.logger.Info("core: rpc listeners successfully shut down")
+
+ // Tell the main thread that shutdown is done.
+ c.clusterListenerShutdownSuccessCh <- struct{}{}
+ }()
+
+ return nil
+}
+
+// refreshRequestForwardingConnection ensures that the client/transport are
+// alive and that the current active address value matches the most
+// recently-known address.
+func (c *Core) refreshRequestForwardingConnection(clusterAddr string) error {
+ c.logger.Trace("core: refreshing forwarding connection")
+ defer c.logger.Trace("core: done refreshing forwarding connection")
+
+ c.requestForwardingConnectionLock.Lock()
+ defer c.requestForwardingConnectionLock.Unlock()
+
+ // Clean things up first
+ c.clearForwardingClients()
+
+ // If we don't have anything to connect to, just return
+ if clusterAddr == "" {
+ return nil
+ }
+
+ clusterURL, err := url.Parse(clusterAddr)
+ if err != nil {
+ c.logger.Error("core: error parsing cluster address attempting to refresh forwarding connection", "error", err)
+ return err
+ }
+
+ switch os.Getenv("VAULT_USE_GRPC_REQUEST_FORWARDING") {
+ case "":
+ // Set up normal HTTP forwarding handling
+ tlsConfig, err := c.ClusterTLSConfig()
+ if err != nil {
+ c.logger.Error("core: error fetching cluster tls configuration when trying to create connection", "error", err)
+ return err
+ }
+ tp := &http2.Transport{
+ TLSClientConfig: tlsConfig,
+ }
+ c.requestForwardingConnection = &activeConnection{
+ transport: tp,
+ clusterAddr: clusterAddr,
+ }
+
+ default:
+ // Set up grpc forwarding handling
+ // It's not really insecure, but we have to dial manually to get the
+ // ALPN header right. It's just "insecure" because GRPC isn't managing
+ // the TLS state.
+
+ ctx, cancelFunc := context.WithCancel(context.Background())
+ c.rpcClientConnCancelFunc = cancelFunc
+ c.rpcClientConn, err = grpc.DialContext(ctx, clusterURL.Host, grpc.WithDialer(c.getGRPCDialer("req_fw_sb-act_v1", "", nil)), grpc.WithInsecure())
+ if err != nil {
+ c.logger.Error("core: err setting up forwarding rpc client", "error", err)
+ return err
+ }
+ c.rpcForwardingClient = NewRequestForwardingClient(c.rpcClientConn)
+ }
+
+ return nil
+}
+
+func (c *Core) clearForwardingClients() {
+ c.logger.Trace("core: clearing forwarding clients")
+ defer c.logger.Trace("core: done clearing forwarding clients")
+
+ if c.requestForwardingConnection != nil {
+ c.requestForwardingConnection.transport.CloseIdleConnections()
+ c.requestForwardingConnection = nil
+ }
+
+ if c.rpcClientConnCancelFunc != nil {
+ c.rpcClientConnCancelFunc()
+ c.rpcClientConnCancelFunc = nil
+ }
+ if c.rpcClientConn != nil {
+ c.rpcClientConn.Close()
+ c.rpcClientConn = nil
+ }
+ c.rpcForwardingClient = nil
+}
+
+// ForwardRequest forwards a given request to the active node and returns the
+// response.
+func (c *Core) ForwardRequest(req *http.Request) (int, http.Header, []byte, error) {
+ c.requestForwardingConnectionLock.RLock()
+ defer c.requestForwardingConnectionLock.RUnlock()
+
+ switch os.Getenv("VAULT_USE_GRPC_REQUEST_FORWARDING") {
+ case "":
+ if c.requestForwardingConnection == nil {
+ return 0, nil, nil, ErrCannotForward
+ }
+
+ if c.requestForwardingConnection.clusterAddr == "" {
+ return 0, nil, nil, ErrCannotForward
+ }
+
+ freq, err := forwarding.GenerateForwardedHTTPRequest(req, c.requestForwardingConnection.clusterAddr+"/cluster/local/forwarded-request")
+ if err != nil {
+ c.logger.Error("core/ForwardRequest: error creating forwarded request", "error", err)
+ return 0, nil, nil, fmt.Errorf("error creating forwarding request")
+ }
+
+ //resp, err := c.requestForwardingConnection.Do(freq)
+ resp, err := c.requestForwardingConnection.transport.RoundTrip(freq)
+ if err != nil {
+ return 0, nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ // Read the body into a buffer so we can write it back out to the
+ // original requestor
+ buf := bytes.NewBuffer(nil)
+ _, err = buf.ReadFrom(resp.Body)
+ if err != nil {
+ return 0, nil, nil, err
+ }
+ return resp.StatusCode, resp.Header, buf.Bytes(), nil
+
+ default:
+ if c.rpcForwardingClient == nil {
+ return 0, nil, nil, ErrCannotForward
+ }
+
+ freq, err := forwarding.GenerateForwardedRequest(req)
+ if err != nil {
+ c.logger.Error("core/ForwardRequest: error creating forwarding RPC request", "error", err)
+ return 0, nil, nil, fmt.Errorf("error creating forwarding RPC request")
+ }
+ if freq == nil {
+ c.logger.Error("core/ForwardRequest: got nil forwarding RPC request")
+ return 0, nil, nil, fmt.Errorf("got nil forwarding RPC request")
+ }
+ resp, err := c.rpcForwardingClient.ForwardRequest(context.Background(), freq, grpc.FailFast(true))
+ if err != nil {
+ c.logger.Error("core/ForwardRequest: error during forwarded RPC request", "error", err)
+ return 0, nil, nil, fmt.Errorf("error during forwarding RPC request")
+ }
+
+ var header http.Header
+ if resp.HeaderEntries != nil {
+ header = make(http.Header)
+ for k, v := range resp.HeaderEntries {
+ for _, j := range v.Values {
+ header.Add(k, j)
+ }
+ }
+ }
+
+ return int(resp.StatusCode), header, resp.Body, nil
+ }
+}
+
+// getGRPCDialer is used to return a dialer that has the correct TLS
+// configuration. Otherwise gRPC tries to be helpful and stomps all over our
+// NextProtos.
+func (c *Core) getGRPCDialer(alpnProto, serverName string, caCert *x509.Certificate) func(string, time.Duration) (net.Conn, error) {
+ return func(addr string, timeout time.Duration) (net.Conn, error) {
+ tlsConfig, err := c.ClusterTLSConfig()
+ if err != nil {
+ c.logger.Error("core: failed to get tls configuration", "error", err)
+ return nil, err
+ }
+ if serverName != "" {
+ tlsConfig.ServerName = serverName
+ }
+ if caCert != nil {
+ pool := x509.NewCertPool()
+ pool.AddCert(caCert)
+ tlsConfig.RootCAs = pool
+ tlsConfig.ClientCAs = pool
+ }
+ c.logger.Trace("core: creating rpc dialer", "host", tlsConfig.ServerName)
+
+ tlsConfig.NextProtos = []string{alpnProto}
+ dialer := &net.Dialer{
+ Timeout: timeout,
+ }
+ return tls.DialWithDialer(dialer, "tcp", addr, tlsConfig)
+ }
+}
+
+type forwardedRequestRPCServer struct {
+ core *Core
+ handler http.Handler
+}
+
+func (s *forwardedRequestRPCServer) ForwardRequest(ctx context.Context, freq *forwarding.Request) (*forwarding.Response, error) {
+ //s.core.logger.Trace("forwarding: serving rpc forwarded request")
+
+ // Parse an http.Request out of it
+ req, err := forwarding.ParseForwardedRequest(freq)
+ if err != nil {
+ return nil, err
+ }
+
+ // A very dummy response writer that doesn't follow normal semantics, just
+ // lets you write a status code (last written wins) and a body. But it
+ // meets the interface requirements.
+ w := forwarding.NewRPCResponseWriter()
+
+ s.handler.ServeHTTP(w, req)
+
+ resp := &forwarding.Response{
+ StatusCode: uint32(w.StatusCode()),
+ Body: w.Body().Bytes(),
+ }
+
+ header := w.Header()
+ if header != nil {
+ resp.HeaderEntries = make(map[string]*forwarding.HeaderEntry, len(header))
+ for k, v := range header {
+ resp.HeaderEntries[k] = &forwarding.HeaderEntry{
+ Values: v,
+ }
+ }
+ }
+
+ return resp, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.pb.go b/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.pb.go
new file mode 100644
index 0000000..cae684d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.pb.go
@@ -0,0 +1,122 @@
+// Code generated by protoc-gen-go.
+// source: request_forwarding_service.proto
+// DO NOT EDIT!
+
+/*
+Package vault is a generated protocol buffer package.
+
+It is generated from these files:
+ request_forwarding_service.proto
+
+It has these top-level messages:
+*/
+package vault
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import forwarding "github.com/hashicorp/vault/helper/forwarding"
+
+import (
+ context "golang.org/x/net/context"
+ grpc "google.golang.org/grpc"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// Client API for RequestForwarding service
+
+type RequestForwardingClient interface {
+ ForwardRequest(ctx context.Context, in *forwarding.Request, opts ...grpc.CallOption) (*forwarding.Response, error)
+}
+
+type requestForwardingClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewRequestForwardingClient(cc *grpc.ClientConn) RequestForwardingClient {
+ return &requestForwardingClient{cc}
+}
+
+func (c *requestForwardingClient) ForwardRequest(ctx context.Context, in *forwarding.Request, opts ...grpc.CallOption) (*forwarding.Response, error) {
+ out := new(forwarding.Response)
+ err := grpc.Invoke(ctx, "/vault.RequestForwarding/ForwardRequest", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Server API for RequestForwarding service
+
+type RequestForwardingServer interface {
+ ForwardRequest(context.Context, *forwarding.Request) (*forwarding.Response, error)
+}
+
+func RegisterRequestForwardingServer(s *grpc.Server, srv RequestForwardingServer) {
+ s.RegisterService(&_RequestForwarding_serviceDesc, srv)
+}
+
+func _RequestForwarding_ForwardRequest_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(forwarding.Request)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(RequestForwardingServer).ForwardRequest(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/vault.RequestForwarding/ForwardRequest",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(RequestForwardingServer).ForwardRequest(ctx, req.(*forwarding.Request))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _RequestForwarding_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "vault.RequestForwarding",
+ HandlerType: (*RequestForwardingServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "ForwardRequest",
+ Handler: _RequestForwarding_ForwardRequest_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "request_forwarding_service.proto",
+}
+
+func init() { proto.RegisterFile("request_forwarding_service.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 151 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x28, 0x4a, 0x2d, 0x2c,
+ 0x4d, 0x2d, 0x2e, 0x89, 0x4f, 0xcb, 0x2f, 0x2a, 0x4f, 0x2c, 0x4a, 0xc9, 0xcc, 0x4b, 0x8f, 0x2f,
+ 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2d, 0x4b,
+ 0x2c, 0xcd, 0x29, 0x91, 0xb2, 0x48, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5,
+ 0xcf, 0x48, 0x2c, 0xce, 0xc8, 0x4c, 0xce, 0x2f, 0x2a, 0xd0, 0x07, 0xcb, 0xe9, 0x67, 0xa4, 0xe6,
+ 0x14, 0xa4, 0x16, 0xe9, 0x23, 0x8c, 0xd0, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0x86, 0x18, 0x60, 0x14,
+ 0xc4, 0x25, 0x18, 0x04, 0xb1, 0xc4, 0x0d, 0xae, 0x40, 0xc8, 0x96, 0x8b, 0x0f, 0xca, 0x83, 0xca,
+ 0x09, 0x09, 0xeb, 0x21, 0xf4, 0xeb, 0x41, 0x05, 0xa5, 0x44, 0x50, 0x05, 0x8b, 0x0b, 0xf2, 0xf3,
+ 0x8a, 0x53, 0x95, 0x18, 0x92, 0xd8, 0xc0, 0x46, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x81,
+ 0xce, 0x3f, 0x7f, 0xbf, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.proto b/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.proto
new file mode 100644
index 0000000..4ab32c1
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.proto
@@ -0,0 +1,9 @@
+syntax = "proto3";
+
+import "github.com/hashicorp/vault/helper/forwarding/types.proto";
+
+package vault;
+
+service RequestForwarding {
+ rpc ForwardRequest(forwarding.Request) returns (forwarding.Response) {}
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/request_handling.go b/vendor/github.com/hashicorp/vault/vault/request_handling.go
new file mode 100644
index 0000000..ad37b5a
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/request_handling.go
@@ -0,0 +1,451 @@
+package vault
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/armon/go-metrics"
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/vault/helper/consts"
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/helper/policyutil"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/logical"
+)
+
+// HandleRequest is used to handle a new incoming request
+func (c *Core) HandleRequest(req *logical.Request) (resp *logical.Response, err error) {
+ c.stateLock.RLock()
+ defer c.stateLock.RUnlock()
+ if c.sealed {
+ return nil, consts.ErrSealed
+ }
+ if c.standby {
+ return nil, consts.ErrStandby
+ }
+
+ // Allowing writing to a path ending in / makes it extremely difficult to
+ // understand user intent for the filesystem-like backends (generic,
+ // cubbyhole) -- did they want a key named foo/ or did they want to write
+ // to a directory foo/ with no (or forgotten) key, or...? It also affects
+ // lookup, because paths ending in / are considered prefixes by some
+ // backends. Basically, it's all just terrible, so don't allow it.
+ if strings.HasSuffix(req.Path, "/") &&
+ (req.Operation == logical.UpdateOperation ||
+ req.Operation == logical.CreateOperation) {
+ return logical.ErrorResponse("cannot write to a path ending in '/'"), nil
+ }
+
+ var auth *logical.Auth
+ if c.router.LoginPath(req.Path) {
+ resp, auth, err = c.handleLoginRequest(req)
+ } else {
+ resp, auth, err = c.handleRequest(req)
+ }
+
+ // Ensure we don't leak internal data
+ if resp != nil {
+ if resp.Secret != nil {
+ resp.Secret.InternalData = nil
+ }
+ if resp.Auth != nil {
+ resp.Auth.InternalData = nil
+ }
+ }
+
+ // We are wrapping if there is anything to wrap (not a nil response) and a
+ // TTL was specified for the token. Errors on a call should be returned to
+ // the caller, so wrapping is turned off if an error is hit and the error
+ // is logged to the audit log.
+ wrapping := resp != nil &&
+ err == nil &&
+ !resp.IsError() &&
+ resp.WrapInfo != nil &&
+ resp.WrapInfo.TTL != 0
+
+ if wrapping {
+ cubbyResp, cubbyErr := c.wrapInCubbyhole(req, resp)
+ // If not successful, returns either an error response from the
+ // cubbyhole backend or an error; if either is set, set resp and err to
+ // those and continue so that that's what we audit log. Otherwise
+ // finish the wrapping and audit log that.
+ if cubbyResp != nil || cubbyErr != nil {
+ resp = cubbyResp
+ err = cubbyErr
+ } else {
+ wrappingResp := &logical.Response{
+ WrapInfo: resp.WrapInfo,
+ }
+ wrappingResp.CloneWarnings(resp)
+ resp = wrappingResp
+ }
+ }
+
+ auditResp := resp
+ // When unwrapping we want to log the actual response that will be written
+ // out. We still want to return the raw value to avoid automatic updating
+ // to any of it.
+ if req.Path == "sys/wrapping/unwrap" &&
+ resp != nil &&
+ resp.Data != nil &&
+ resp.Data[logical.HTTPRawBody] != nil {
+
+ // Decode the JSON
+ httpResp := &logical.HTTPResponse{}
+ err := jsonutil.DecodeJSON(resp.Data[logical.HTTPRawBody].([]byte), httpResp)
+ if err != nil {
+ c.logger.Error("core: failed to unmarshal wrapped HTTP response for audit logging", "error", err)
+ return nil, ErrInternalError
+ }
+
+ auditResp = logical.HTTPResponseToLogicalResponse(httpResp)
+ }
+
+ // Create an audit trail of the response
+ if auditErr := c.auditBroker.LogResponse(auth, req, auditResp, c.auditedHeaders, err); auditErr != nil {
+ c.logger.Error("core: failed to audit response", "request_path", req.Path, "error", auditErr)
+ return nil, ErrInternalError
+ }
+
+ return
+}
+
+func (c *Core) handleRequest(req *logical.Request) (retResp *logical.Response, retAuth *logical.Auth, retErr error) {
+ defer metrics.MeasureSince([]string{"core", "handle_request"}, time.Now())
+
+ // Validate the token
+ auth, te, ctErr := c.checkToken(req)
+ // We run this logic first because we want to decrement the use count even in the case of an error
+ if te != nil {
+ // Attempt to use the token (decrement NumUses)
+ var err error
+ te, err = c.tokenStore.UseToken(te)
+ if err != nil {
+ c.logger.Error("core: failed to use token", "error", err)
+ retErr = multierror.Append(retErr, ErrInternalError)
+ return nil, nil, retErr
+ }
+ if te == nil {
+ // Token has been revoked by this point
+ retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
+ return nil, nil, retErr
+ }
+ if te.NumUses == -1 {
+ // We defer a revocation until after logic has run, since this is a
+ // valid request (this is the token's final use). We pass the ID in
+ // directly just to be safe in case something else modifies te later.
+ defer func(id string) {
+ err = c.tokenStore.Revoke(id)
+ if err != nil {
+ c.logger.Error("core: failed to revoke token", "error", err)
+ retResp = nil
+ retAuth = nil
+ retErr = multierror.Append(retErr, ErrInternalError)
+ }
+ if retResp != nil && retResp.Secret != nil &&
+ // Some backends return a TTL even without a Lease ID
+ retResp.Secret.LeaseID != "" {
+ retResp = logical.ErrorResponse("Secret cannot be returned; token had one use left, so leased credentials were immediately revoked.")
+ return
+ }
+ }(te.ID)
+ }
+ }
+ if ctErr != nil {
+ // If it is an internal error we return that, otherwise we
+ // return invalid request so that the status codes can be correct
+ var errType error
+ switch ctErr {
+ case ErrInternalError, logical.ErrPermissionDenied:
+ errType = ctErr
+ default:
+ errType = logical.ErrInvalidRequest
+ }
+
+ if err := c.auditBroker.LogRequest(auth, req, c.auditedHeaders, ctErr); err != nil {
+ c.logger.Error("core: failed to audit request", "path", req.Path, "error", err)
+ }
+
+ if errType != nil {
+ retErr = multierror.Append(retErr, errType)
+ }
+ return logical.ErrorResponse(ctErr.Error()), nil, retErr
+ }
+
+ // Attach the display name
+ req.DisplayName = auth.DisplayName
+
+ // Create an audit trail of the request
+ if err := c.auditBroker.LogRequest(auth, req, c.auditedHeaders, nil); err != nil {
+ c.logger.Error("core: failed to audit request", "path", req.Path, "error", err)
+ retErr = multierror.Append(retErr, ErrInternalError)
+ return nil, auth, retErr
+ }
+
+ // Route the request
+ resp, routeErr := c.router.Route(req)
+ if resp != nil {
+ // If wrapping is used, use the shortest between the request and response
+ var wrapTTL time.Duration
+ var wrapFormat string
+
+ // Ensure no wrap info information is set other than, possibly, the TTL
+ if resp.WrapInfo != nil {
+ if resp.WrapInfo.TTL > 0 {
+ wrapTTL = resp.WrapInfo.TTL
+ }
+ wrapFormat = resp.WrapInfo.Format
+ resp.WrapInfo = nil
+ }
+
+ if req.WrapInfo != nil {
+ if req.WrapInfo.TTL > 0 {
+ switch {
+ case wrapTTL == 0:
+ wrapTTL = req.WrapInfo.TTL
+ case req.WrapInfo.TTL < wrapTTL:
+ wrapTTL = req.WrapInfo.TTL
+ }
+ }
+ // If the wrap format hasn't been set by the response, set it to
+ // the request format
+ if req.WrapInfo.Format != "" && wrapFormat == "" {
+ wrapFormat = req.WrapInfo.Format
+ }
+ }
+
+ if wrapTTL > 0 {
+ resp.WrapInfo = &logical.ResponseWrapInfo{
+ TTL: wrapTTL,
+ Format: wrapFormat,
+ }
+ }
+ }
+
+ // If there is a secret, we must register it with the expiration manager.
+ // We exclude renewal of a lease, since it does not need to be re-registered
+ if resp != nil && resp.Secret != nil && !strings.HasPrefix(req.Path, "sys/renew") {
+ // Get the SystemView for the mount
+ sysView := c.router.MatchingSystemView(req.Path)
+ if sysView == nil {
+ c.logger.Error("core: unable to retrieve system view from router")
+ retErr = multierror.Append(retErr, ErrInternalError)
+ return nil, auth, retErr
+ }
+
+ // Apply the default lease if none given
+ if resp.Secret.TTL == 0 {
+ resp.Secret.TTL = sysView.DefaultLeaseTTL()
+ }
+
+ // Limit the lease duration
+ maxTTL := sysView.MaxLeaseTTL()
+ if resp.Secret.TTL > maxTTL {
+ resp.Secret.TTL = maxTTL
+ }
+
+ // Generic mounts should return the TTL but not register
+ // for a lease as this provides a massive slowdown
+ registerLease := true
+ matchingBackend := c.router.MatchingBackend(req.Path)
+ if matchingBackend == nil {
+ c.logger.Error("core: unable to retrieve generic backend from router")
+ retErr = multierror.Append(retErr, ErrInternalError)
+ return nil, auth, retErr
+ }
+ if ptbe, ok := matchingBackend.(*PassthroughBackend); ok {
+ if !ptbe.GeneratesLeases() {
+ registerLease = false
+ resp.Secret.Renewable = false
+ }
+ }
+
+ if registerLease {
+ leaseID, err := c.expiration.Register(req, resp)
+ if err != nil {
+ c.logger.Error("core: failed to register lease", "request_path", req.Path, "error", err)
+ retErr = multierror.Append(retErr, ErrInternalError)
+ return nil, auth, retErr
+ }
+ resp.Secret.LeaseID = leaseID
+ }
+ }
+
+ // Only the token store is allowed to return an auth block, for any
+ // other request this is an internal error. We exclude renewal of a token,
+ // since it does not need to be re-registered
+ if resp != nil && resp.Auth != nil && !strings.HasPrefix(req.Path, "auth/token/renew") {
+ if !strings.HasPrefix(req.Path, "auth/token/") {
+ c.logger.Error("core: unexpected Auth response for non-token backend", "request_path", req.Path)
+ retErr = multierror.Append(retErr, ErrInternalError)
+ return nil, auth, retErr
+ }
+
+ // Register with the expiration manager. We use the token's actual path
+ // here because roles allow suffixes.
+ te, err := c.tokenStore.Lookup(resp.Auth.ClientToken)
+ if err != nil {
+ c.logger.Error("core: failed to look up token", "error", err)
+ retErr = multierror.Append(retErr, ErrInternalError)
+ return nil, nil, retErr
+ }
+
+ if err := c.expiration.RegisterAuth(te.Path, resp.Auth); err != nil {
+ c.logger.Error("core: failed to register token lease", "request_path", req.Path, "error", err)
+ retErr = multierror.Append(retErr, ErrInternalError)
+ return nil, auth, retErr
+ }
+ }
+
+ if resp != nil &&
+ req.Path == "cubbyhole/response" &&
+ len(te.Policies) == 1 &&
+ te.Policies[0] == responseWrappingPolicyName {
+ resp.AddWarning("Reading from 'cubbyhole/response' is deprecated. Please use sys/wrapping/unwrap to unwrap responses, as it provides additional security checks and other benefits.")
+ }
+
+ // Return the response and error
+ if routeErr != nil {
+ retErr = multierror.Append(retErr, routeErr)
+ }
+ return resp, auth, retErr
+}
+
+// handleLoginRequest is used to handle a login request, which is an
+// unauthenticated request to the backend.
+func (c *Core) handleLoginRequest(req *logical.Request) (*logical.Response, *logical.Auth, error) {
+ defer metrics.MeasureSince([]string{"core", "handle_login_request"}, time.Now())
+
+ // Create an audit trail of the request, auth is not available on login requests
+ if err := c.auditBroker.LogRequest(nil, req, c.auditedHeaders, nil); err != nil {
+ c.logger.Error("core: failed to audit request", "path", req.Path, "error", err)
+ return nil, nil, ErrInternalError
+ }
+
+ // The token store uses authentication even when creating a new token,
+ // so it's handled in handleRequest. It should not be reached here.
+ if strings.HasPrefix(req.Path, "auth/token/") {
+ c.logger.Error("core: unexpected login request for token backend", "request_path", req.Path)
+ return nil, nil, ErrInternalError
+ }
+
+ // Route the request
+ resp, routeErr := c.router.Route(req)
+ if resp != nil {
+ // If wrapping is used, use the shortest between the request and response
+ var wrapTTL time.Duration
+ var wrapFormat string
+
+ // Ensure no wrap info information is set other than, possibly, the TTL
+ if resp.WrapInfo != nil {
+ if resp.WrapInfo.TTL > 0 {
+ wrapTTL = resp.WrapInfo.TTL
+ }
+ wrapFormat = resp.WrapInfo.Format
+ resp.WrapInfo = nil
+ }
+
+ if req.WrapInfo != nil {
+ if req.WrapInfo.TTL > 0 {
+ switch {
+ case wrapTTL == 0:
+ wrapTTL = req.WrapInfo.TTL
+ case req.WrapInfo.TTL < wrapTTL:
+ wrapTTL = req.WrapInfo.TTL
+ }
+ }
+ if req.WrapInfo.Format != "" && wrapFormat == "" {
+ wrapFormat = req.WrapInfo.Format
+ }
+ }
+
+ if wrapTTL > 0 {
+ resp.WrapInfo = &logical.ResponseWrapInfo{
+ TTL: wrapTTL,
+ Format: wrapFormat,
+ }
+ }
+ }
+
+ // A login request should never return a secret!
+ if resp != nil && resp.Secret != nil {
+ c.logger.Error("core: unexpected Secret response for login path", "request_path", req.Path)
+ return nil, nil, ErrInternalError
+ }
+
+ // If the response generated an authentication, then generate the token
+ var auth *logical.Auth
+ if resp != nil && resp.Auth != nil {
+ auth = resp.Auth
+
+ if strutil.StrListSubset(auth.Policies, []string{"root"}) {
+ return logical.ErrorResponse("authentication backends cannot create root tokens"), nil, logical.ErrInvalidRequest
+ }
+
+ // Determine the source of the login
+ source := c.router.MatchingMount(req.Path)
+ source = strings.TrimPrefix(source, credentialRoutePrefix)
+ source = strings.Replace(source, "/", "-", -1)
+
+ // Prepend the source to the display name
+ auth.DisplayName = strings.TrimSuffix(source+auth.DisplayName, "-")
+
+ sysView := c.router.MatchingSystemView(req.Path)
+ if sysView == nil {
+ c.logger.Error("core: unable to look up sys view for login path", "request_path", req.Path)
+ return nil, nil, ErrInternalError
+ }
+
+ // Set the default lease if not provided
+ if auth.TTL == 0 {
+ auth.TTL = sysView.DefaultLeaseTTL()
+ }
+
+ // Limit the lease duration
+ if auth.TTL > sysView.MaxLeaseTTL() {
+ auth.TTL = sysView.MaxLeaseTTL()
+ }
+
+ // Generate a token
+ te := TokenEntry{
+ Path: req.Path,
+ Policies: auth.Policies,
+ Meta: auth.Metadata,
+ DisplayName: auth.DisplayName,
+ CreationTime: time.Now().Unix(),
+ TTL: auth.TTL,
+ NumUses: auth.NumUses,
+ }
+
+ te.Policies = policyutil.SanitizePolicies(te.Policies, true)
+
+ // Prevent internal policies from being assigned to tokens
+ for _, policy := range te.Policies {
+ if strutil.StrListContains(nonAssignablePolicies, policy) {
+ return logical.ErrorResponse(fmt.Sprintf("cannot assign policy %q", policy)), nil, logical.ErrInvalidRequest
+ }
+ }
+
+ if err := c.tokenStore.create(&te); err != nil {
+ c.logger.Error("core: failed to create token", "error", err)
+ return nil, auth, ErrInternalError
+ }
+
+ // Populate the client token and accessor
+ auth.ClientToken = te.ID
+ auth.Accessor = te.Accessor
+ auth.Policies = te.Policies
+
+ // Register with the expiration manager
+ if err := c.expiration.RegisterAuth(te.Path, auth); err != nil {
+ c.logger.Error("core: failed to register token lease", "request_path", req.Path, "error", err)
+ return nil, auth, ErrInternalError
+ }
+
+ // Attach the display name, might be used by audit backends
+ req.DisplayName = auth.DisplayName
+ }
+
+ return resp, auth, routeErr
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/request_handling_test.go b/vendor/github.com/hashicorp/vault/vault/request_handling_test.go
new file mode 100644
index 0000000..c966b04
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/request_handling_test.go
@@ -0,0 +1,142 @@
+package vault
+
+import (
+ "testing"
+ "time"
+
+ "github.com/hashicorp/go-uuid"
+ credUserpass "github.com/hashicorp/vault/builtin/credential/userpass"
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestRequestHandling_Wrapping(t *testing.T) {
+ core, _, root := TestCoreUnsealed(t)
+
+ core.logicalBackends["generic"] = PassthroughBackendFactory
+
+ meUUID, _ := uuid.GenerateUUID()
+ err := core.mount(&MountEntry{
+ Table: mountTableType,
+ UUID: meUUID,
+ Path: "wraptest",
+ Type: "generic",
+ })
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // No duration specified
+ req := &logical.Request{
+ Path: "wraptest/foo",
+ ClientToken: root,
+ Operation: logical.UpdateOperation,
+ Data: map[string]interface{}{
+ "zip": "zap",
+ },
+ }
+ resp, err := core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ req = &logical.Request{
+ Path: "wraptest/foo",
+ ClientToken: root,
+ Operation: logical.ReadOperation,
+ WrapInfo: &logical.RequestWrapInfo{
+ TTL: time.Duration(15 * time.Second),
+ },
+ }
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil {
+ t.Fatalf("bad: %v", resp)
+ }
+ if resp.WrapInfo == nil || resp.WrapInfo.TTL != time.Duration(15*time.Second) {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
+
+func TestRequestHandling_LoginWrapping(t *testing.T) {
+ core, _, root := TestCoreUnsealed(t)
+
+ if err := core.loadMounts(); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ core.credentialBackends["userpass"] = credUserpass.Factory
+
+ // No duration specified
+ req := &logical.Request{
+ Path: "sys/auth/userpass",
+ ClientToken: root,
+ Operation: logical.UpdateOperation,
+ Data: map[string]interface{}{
+ "type": "userpass",
+ },
+ }
+ resp, err := core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ req.Path = "auth/userpass/users/test"
+ req.Data = map[string]interface{}{
+ "password": "foo",
+ "policies": "default",
+ }
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ req = &logical.Request{
+ Path: "auth/userpass/login/test",
+ Operation: logical.UpdateOperation,
+ Data: map[string]interface{}{
+ "password": "foo",
+ },
+ }
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil {
+ t.Fatalf("bad: %v", resp)
+ }
+ if resp.WrapInfo != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ req = &logical.Request{
+ Path: "auth/userpass/login/test",
+ Operation: logical.UpdateOperation,
+ WrapInfo: &logical.RequestWrapInfo{
+ TTL: time.Duration(15 * time.Second),
+ },
+ Data: map[string]interface{}{
+ "password": "foo",
+ },
+ }
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil {
+ t.Fatalf("bad: %v", resp)
+ }
+ if resp.WrapInfo == nil || resp.WrapInfo.TTL != time.Duration(15*time.Second) {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/rollback.go b/vendor/github.com/hashicorp/vault/vault/rollback.go
new file mode 100644
index 0000000..9ace6b3
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/rollback.go
@@ -0,0 +1,230 @@
+package vault
+
+import (
+ "strings"
+ "sync"
+ "time"
+
+ log "github.com/mgutz/logxi/v1"
+
+ "github.com/armon/go-metrics"
+ "github.com/hashicorp/vault/logical"
+)
+
+const (
+ // rollbackPeriod is how often we attempt rollbacks for all the backends
+ rollbackPeriod = time.Minute
+)
+
+// RollbackManager is responsible for performing rollbacks of partial
+// secrets within logical backends.
+//
+// During normal operations, it is possible for logical backends to
+// error partially through an operation. These are called "partial secrets":
+// they are never sent back to a user, but they do need to be cleaned up.
+// This manager handles that by periodically (on a timer) requesting that the
+// backends clean up.
+//
+// The RollbackManager periodically initiates a logical.RollbackOperation
+// on every mounted logical backend. It ensures that only one rollback operation
+// is in-flight at any given time within a single seal/unseal phase.
+type RollbackManager struct {
+ logger log.Logger
+
+ // This gives the current mount table of both logical and credential backends,
+ // plus a RWMutex that is locked for reading. It is up to the caller to RUnlock
+ // it when done with the mount table.
+ backends func() []*MountEntry
+
+ router *Router
+ period time.Duration
+
+ inflightAll sync.WaitGroup
+ inflight map[string]*rollbackState
+ inflightLock sync.RWMutex
+
+ doneCh chan struct{}
+ shutdown bool
+ shutdownCh chan struct{}
+ shutdownLock sync.Mutex
+}
+
+// rollbackState is used to track the state of a single rollback attempt
+type rollbackState struct {
+ lastError error
+ sync.WaitGroup
+}
+
+// NewRollbackManager is used to create a new rollback manager
+func NewRollbackManager(logger log.Logger, backendsFunc func() []*MountEntry, router *Router) *RollbackManager {
+ r := &RollbackManager{
+ logger: logger,
+ backends: backendsFunc,
+ router: router,
+ period: rollbackPeriod,
+ inflight: make(map[string]*rollbackState),
+ doneCh: make(chan struct{}),
+ shutdownCh: make(chan struct{}),
+ }
+ return r
+}
+
+// Start starts the rollback manager
+func (m *RollbackManager) Start() {
+ go m.run()
+}
+
+// Stop stops the running manager. This will wait for any in-flight
+// rollbacks to complete.
+func (m *RollbackManager) Stop() {
+ m.shutdownLock.Lock()
+ defer m.shutdownLock.Unlock()
+ if !m.shutdown {
+ m.shutdown = true
+ close(m.shutdownCh)
+ <-m.doneCh
+ }
+ m.inflightAll.Wait()
+}
+
+// run is a long running routine to periodically invoke rollback
+func (m *RollbackManager) run() {
+ m.logger.Info("rollback: starting rollback manager")
+ tick := time.NewTicker(m.period)
+ defer tick.Stop()
+ defer close(m.doneCh)
+ for {
+ select {
+ case <-tick.C:
+ m.triggerRollbacks()
+
+ case <-m.shutdownCh:
+ m.logger.Info("rollback: stopping rollback manager")
+ return
+ }
+ }
+}
+
+// triggerRollbacks is used to trigger the rollbacks across all the backends
+func (m *RollbackManager) triggerRollbacks() {
+
+ backends := m.backends()
+
+ for _, e := range backends {
+ path := e.Path
+ if e.Table == credentialTableType {
+ path = "auth/" + path
+ }
+ m.inflightLock.RLock()
+ _, ok := m.inflight[path]
+ m.inflightLock.RUnlock()
+ if !ok {
+ m.startRollback(path)
+ }
+ }
+}
+
+// startRollback is used to start an async rollback attempt.
+// This must be called with the inflightLock held.
+func (m *RollbackManager) startRollback(path string) *rollbackState {
+ rs := &rollbackState{}
+ rs.Add(1)
+ m.inflightAll.Add(1)
+ m.inflightLock.Lock()
+ m.inflight[path] = rs
+ m.inflightLock.Unlock()
+ go m.attemptRollback(path, rs)
+ return rs
+}
+
+// attemptRollback invokes a RollbackOperation for the given path
+func (m *RollbackManager) attemptRollback(path string, rs *rollbackState) (err error) {
+ defer metrics.MeasureSince([]string{"rollback", "attempt", strings.Replace(path, "/", "-", -1)}, time.Now())
+ if m.logger.IsTrace() {
+ m.logger.Trace("rollback: attempting rollback", "path", path)
+ }
+
+ defer func() {
+ rs.lastError = err
+ rs.Done()
+ m.inflightAll.Done()
+ m.inflightLock.Lock()
+ delete(m.inflight, path)
+ m.inflightLock.Unlock()
+ }()
+
+ // Invoke a RollbackOperation
+ req := &logical.Request{
+ Operation: logical.RollbackOperation,
+ Path: path,
+ }
+ _, err = m.router.Route(req)
+
+ // If the error is an unsupported operation, then it doesn't
+ // matter, the backend doesn't support it.
+ if err == logical.ErrUnsupportedOperation {
+ err = nil
+ }
+ if err != nil {
+ m.logger.Error("rollback: error rolling back", "path", path, "error", err)
+ }
+ return
+}
+
+// Rollback is used to trigger an immediate rollback of the path,
+// or to join an existing rollback operation if in flight.
+func (m *RollbackManager) Rollback(path string) error {
+ // Check for an existing attempt and start one if none
+ m.inflightLock.RLock()
+ rs, ok := m.inflight[path]
+ m.inflightLock.RUnlock()
+ if !ok {
+ rs = m.startRollback(path)
+ }
+
+ // Wait for the attempt to finish
+ rs.Wait()
+
+ // Return the last error
+ return rs.lastError
+}
+
+// The methods below are the hooks from core that are called pre/post seal.
+
+// startRollback is used to start the rollback manager after unsealing
+func (c *Core) startRollback() error {
+ backendsFunc := func() []*MountEntry {
+ ret := []*MountEntry{}
+ c.mountsLock.RLock()
+ defer c.mountsLock.RUnlock()
+ // During teardown/setup after a leader change or unseal there could be
+ // something racy here so make sure the table isn't nil
+ if c.mounts != nil {
+ for _, entry := range c.mounts.Entries {
+ ret = append(ret, entry)
+ }
+ }
+ c.authLock.RLock()
+ defer c.authLock.RUnlock()
+ // During teardown/setup after a leader change or unseal there could be
+ // something racy here so make sure the table isn't nil
+ if c.auth != nil {
+ for _, entry := range c.auth.Entries {
+ ret = append(ret, entry)
+ }
+ }
+ return ret
+ }
+ c.rollback = NewRollbackManager(c.logger, backendsFunc, c.router)
+ c.rollback.Start()
+ return nil
+}
+
+// stopRollback is used to stop running the rollback manager before sealing
+func (c *Core) stopRollback() error {
+ if c.rollback != nil {
+ c.rollback.Stop()
+ c.rollback = nil
+ }
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/rollback_test.go b/vendor/github.com/hashicorp/vault/vault/rollback_test.go
new file mode 100644
index 0000000..797993a
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/rollback_test.go
@@ -0,0 +1,108 @@
+package vault
+
+import (
+ "sync"
+ "testing"
+ "time"
+
+ log "github.com/mgutz/logxi/v1"
+
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/helper/logformat"
+)
+
+// mockRollback returns a mock rollback manager
+func mockRollback(t *testing.T) (*RollbackManager, *NoopBackend) {
+ backend := new(NoopBackend)
+ mounts := new(MountTable)
+ router := NewRouter()
+
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "logical/")
+
+ mounts.Entries = []*MountEntry{
+ &MountEntry{
+ Path: "foo",
+ },
+ }
+ meUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := router.Mount(backend, "foo", &MountEntry{UUID: meUUID}, view); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ mountsFunc := func() []*MountEntry {
+ return mounts.Entries
+ }
+
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ rb := NewRollbackManager(logger, mountsFunc, router)
+ rb.period = 10 * time.Millisecond
+ return rb, backend
+}
+
+func TestRollbackManager(t *testing.T) {
+ m, backend := mockRollback(t)
+ if len(backend.Paths) > 0 {
+ t.Fatalf("bad: %#v", backend)
+ }
+
+ m.Start()
+ time.Sleep(50 * time.Millisecond)
+ m.Stop()
+
+ count := len(backend.Paths)
+ if count == 0 {
+ t.Fatalf("bad: %#v", backend)
+ }
+ if backend.Paths[0] != "" {
+ t.Fatalf("bad: %#v", backend)
+ }
+
+ time.Sleep(50 * time.Millisecond)
+
+ if count != len(backend.Paths) {
+ t.Fatalf("should stop requests: %#v", backend)
+ }
+}
+
+func TestRollbackManager_Join(t *testing.T) {
+ m, backend := mockRollback(t)
+ if len(backend.Paths) > 0 {
+ t.Fatalf("bad: %#v", backend)
+ }
+
+ m.Start()
+ defer m.Stop()
+
+ wg := &sync.WaitGroup{}
+ wg.Add(3)
+
+ go func() {
+ defer wg.Done()
+ err := m.Rollback("foo")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ }()
+
+ go func() {
+ defer wg.Done()
+ err := m.Rollback("foo")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ }()
+
+ go func() {
+ defer wg.Done()
+ err := m.Rollback("foo")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ }()
+ wg.Wait()
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/router.go b/vendor/github.com/hashicorp/vault/vault/router.go
new file mode 100644
index 0000000..5a90dfa
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/router.go
@@ -0,0 +1,404 @@
+package vault
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/armon/go-metrics"
+ "github.com/armon/go-radix"
+ "github.com/hashicorp/vault/helper/salt"
+ "github.com/hashicorp/vault/logical"
+)
+
+// Router is used to do prefix based routing of a request to a logical backend
+type Router struct {
+ l sync.RWMutex
+ root *radix.Tree
+ tokenStoreSalt *salt.Salt
+
+ // storagePrefix maps the prefix used for storage (ala the BarrierView)
+ // to the backend. This is used to map a key back into the backend that owns it.
+ // For example, logical/uuid1/foobar -> secrets/ (generic backend) + foobar
+ storagePrefix *radix.Tree
+}
+
+// NewRouter returns a new router
+func NewRouter() *Router {
+ r := &Router{
+ root: radix.New(),
+ storagePrefix: radix.New(),
+ }
+ return r
+}
+
+// routeEntry is used to represent a mount point in the router
+type routeEntry struct {
+ tainted bool
+ backend logical.Backend
+ mountEntry *MountEntry
+ storageView *BarrierView
+ rootPaths *radix.Tree
+ loginPaths *radix.Tree
+}
+
+// SaltID is used to apply a salt and hash to an ID to make sure its not reversible
+func (re *routeEntry) SaltID(id string) string {
+ return salt.SaltID(re.mountEntry.UUID, id, salt.SHA1Hash)
+}
+
+// Mount is used to expose a logical backend at a given prefix, using a unique salt,
+// and the barrier view for that path.
+func (r *Router) Mount(backend logical.Backend, prefix string, mountEntry *MountEntry, storageView *BarrierView) error {
+ r.l.Lock()
+ defer r.l.Unlock()
+
+ // Check if this is a nested mount
+ if existing, _, ok := r.root.LongestPrefix(prefix); ok && existing != "" {
+ return fmt.Errorf("cannot mount under existing mount '%s'", existing)
+ }
+
+ // Build the paths
+ paths := backend.SpecialPaths()
+ if paths == nil {
+ paths = new(logical.Paths)
+ }
+
+ // Create a mount entry
+ re := &routeEntry{
+ tainted: false,
+ backend: backend,
+ mountEntry: mountEntry,
+ storageView: storageView,
+ rootPaths: pathsToRadix(paths.Root),
+ loginPaths: pathsToRadix(paths.Unauthenticated),
+ }
+ r.root.Insert(prefix, re)
+ r.storagePrefix.Insert(storageView.prefix, re)
+
+ return nil
+}
+
+// Unmount is used to remove a logical backend from a given prefix
+func (r *Router) Unmount(prefix string) error {
+ r.l.Lock()
+ defer r.l.Unlock()
+
+ // Fast-path out if the backend doesn't exist
+ raw, ok := r.root.Get(prefix)
+ if !ok {
+ return nil
+ }
+
+ // Call backend's Cleanup routine
+ re := raw.(*routeEntry)
+ re.backend.Cleanup()
+
+ // Purge from the radix trees
+ r.root.Delete(prefix)
+ r.storagePrefix.Delete(re.storageView.prefix)
+ return nil
+}
+
+// Remount is used to change the mount location of a logical backend
+func (r *Router) Remount(src, dst string) error {
+ r.l.Lock()
+ defer r.l.Unlock()
+
+ // Check for existing mount
+ raw, ok := r.root.Get(src)
+ if !ok {
+ return fmt.Errorf("no mount at '%s'", src)
+ }
+
+ // Update the mount point
+ r.root.Delete(src)
+ r.root.Insert(dst, raw)
+ return nil
+}
+
+// Taint is used to mark a path as tainted. This means only RollbackOperation
+// RevokeOperation requests are allowed to proceed
+func (r *Router) Taint(path string) error {
+ r.l.Lock()
+ defer r.l.Unlock()
+ _, raw, ok := r.root.LongestPrefix(path)
+ if ok {
+ raw.(*routeEntry).tainted = true
+ }
+ return nil
+}
+
+// Untaint is used to unmark a path as tainted.
+func (r *Router) Untaint(path string) error {
+ r.l.Lock()
+ defer r.l.Unlock()
+ _, raw, ok := r.root.LongestPrefix(path)
+ if ok {
+ raw.(*routeEntry).tainted = false
+ }
+ return nil
+}
+
+// MatchingMount returns the mount prefix that would be used for a path
+func (r *Router) MatchingMount(path string) string {
+ r.l.RLock()
+ mount, _, ok := r.root.LongestPrefix(path)
+ r.l.RUnlock()
+ if !ok {
+ return ""
+ }
+ return mount
+}
+
+// MatchingView returns the view used for a path
+func (r *Router) MatchingStorageView(path string) *BarrierView {
+ r.l.RLock()
+ _, raw, ok := r.root.LongestPrefix(path)
+ r.l.RUnlock()
+ if !ok {
+ return nil
+ }
+ return raw.(*routeEntry).storageView
+}
+
+// MatchingMountEntry returns the MountEntry used for a path
+func (r *Router) MatchingMountEntry(path string) *MountEntry {
+ r.l.RLock()
+ _, raw, ok := r.root.LongestPrefix(path)
+ r.l.RUnlock()
+ if !ok {
+ return nil
+ }
+ return raw.(*routeEntry).mountEntry
+}
+
+// MatchingMountEntry returns the MountEntry used for a path
+func (r *Router) MatchingBackend(path string) logical.Backend {
+ r.l.RLock()
+ _, raw, ok := r.root.LongestPrefix(path)
+ r.l.RUnlock()
+ if !ok {
+ return nil
+ }
+ return raw.(*routeEntry).backend
+}
+
+// MatchingSystemView returns the SystemView used for a path
+func (r *Router) MatchingSystemView(path string) logical.SystemView {
+ r.l.RLock()
+ _, raw, ok := r.root.LongestPrefix(path)
+ r.l.RUnlock()
+ if !ok {
+ return nil
+ }
+ return raw.(*routeEntry).backend.System()
+}
+
+// MatchingStoragePrefix returns the mount path matching and storage prefix
+// matching the given path
+func (r *Router) MatchingStoragePrefix(path string) (string, string, bool) {
+ r.l.RLock()
+ _, raw, ok := r.storagePrefix.LongestPrefix(path)
+ r.l.RUnlock()
+ if !ok {
+ return "", "", false
+ }
+
+ // Extract the mount path and storage prefix
+ re := raw.(*routeEntry)
+ mountPath := re.mountEntry.Path
+ prefix := re.storageView.prefix
+ return mountPath, prefix, true
+}
+
+// Route is used to route a given request
+func (r *Router) Route(req *logical.Request) (*logical.Response, error) {
+ resp, _, _, err := r.routeCommon(req, false)
+ return resp, err
+}
+
+// Route is used to route a given existence check request
+func (r *Router) RouteExistenceCheck(req *logical.Request) (bool, bool, error) {
+ _, ok, exists, err := r.routeCommon(req, true)
+ return ok, exists, err
+}
+
+func (r *Router) routeCommon(req *logical.Request, existenceCheck bool) (*logical.Response, bool, bool, error) {
+ // Find the mount point
+ r.l.RLock()
+ mount, raw, ok := r.root.LongestPrefix(req.Path)
+ if !ok {
+ // Re-check for a backend by appending a slash. This lets "foo" mean
+ // "foo/" at the root level which is almost always what we want.
+ req.Path += "/"
+ mount, raw, ok = r.root.LongestPrefix(req.Path)
+ }
+ r.l.RUnlock()
+ if !ok {
+ return logical.ErrorResponse(fmt.Sprintf("no handler for route '%s'", req.Path)), false, false, logical.ErrUnsupportedPath
+ }
+ defer metrics.MeasureSince([]string{"route", string(req.Operation),
+ strings.Replace(mount, "/", "-", -1)}, time.Now())
+ re := raw.(*routeEntry)
+
+ // If the path is tainted, we reject any operation except for
+ // Rollback and Revoke
+ if re.tainted {
+ switch req.Operation {
+ case logical.RevokeOperation, logical.RollbackOperation:
+ default:
+ return logical.ErrorResponse(fmt.Sprintf("no handler for route '%s'", req.Path)), false, false, logical.ErrUnsupportedPath
+ }
+ }
+
+ // Adjust the path to exclude the routing prefix
+ originalPath := req.Path
+ req.Path = strings.TrimPrefix(req.Path, mount)
+ req.MountPoint = mount
+ req.MountType = re.mountEntry.Type
+ if req.Path == "/" {
+ req.Path = ""
+ }
+
+ // Attach the storage view for the request
+ req.Storage = re.storageView
+
+ // Hash the request token unless this is the token backend
+ clientToken := req.ClientToken
+ switch {
+ case strings.HasPrefix(originalPath, "auth/token/"):
+ case strings.HasPrefix(originalPath, "sys/"):
+ case strings.HasPrefix(originalPath, "cubbyhole/"):
+ // In order for the token store to revoke later, we need to have the same
+ // salted ID, so we double-salt what's going to the cubbyhole backend
+ req.ClientToken = re.SaltID(r.tokenStoreSalt.SaltID(req.ClientToken))
+ default:
+ req.ClientToken = re.SaltID(req.ClientToken)
+ }
+
+ // Cache the pointer to the original connection object
+ originalConn := req.Connection
+
+ // Cache the identifier of the request
+ originalReqID := req.ID
+
+ // Cache the client token's number of uses in the request
+ originalClientTokenRemainingUses := req.ClientTokenRemainingUses
+ req.ClientTokenRemainingUses = 0
+
+ // Cache the headers and hide them from backends
+ headers := req.Headers
+ req.Headers = nil
+
+ // Cache the wrap info of the request
+ var wrapInfo *logical.RequestWrapInfo
+ if req.WrapInfo != nil {
+ wrapInfo = &logical.RequestWrapInfo{
+ TTL: req.WrapInfo.TTL,
+ Format: req.WrapInfo.Format,
+ }
+ }
+
+ // Reset the request before returning
+ defer func() {
+ req.Path = originalPath
+ req.MountPoint = mount
+ req.MountType = re.mountEntry.Type
+ req.Connection = originalConn
+ req.ID = originalReqID
+ req.Storage = nil
+ req.ClientToken = clientToken
+ req.ClientTokenRemainingUses = originalClientTokenRemainingUses
+ req.WrapInfo = wrapInfo
+ req.Headers = headers
+ // This is only set in one place, after routing, so should never be set
+ // by a backend
+ req.SetLastRemoteWAL(0)
+ }()
+
+ // Invoke the backend
+ if existenceCheck {
+ ok, exists, err := re.backend.HandleExistenceCheck(req)
+ return nil, ok, exists, err
+ } else {
+ resp, err := re.backend.HandleRequest(req)
+ return resp, false, false, err
+ }
+}
+
+// RootPath checks if the given path requires root privileges
+func (r *Router) RootPath(path string) bool {
+ r.l.RLock()
+ mount, raw, ok := r.root.LongestPrefix(path)
+ r.l.RUnlock()
+ if !ok {
+ return false
+ }
+ re := raw.(*routeEntry)
+
+ // Trim to get remaining path
+ remain := strings.TrimPrefix(path, mount)
+
+ // Check the rootPaths of this backend
+ match, raw, ok := re.rootPaths.LongestPrefix(remain)
+ if !ok {
+ return false
+ }
+ prefixMatch := raw.(bool)
+
+ // Handle the prefix match case
+ if prefixMatch {
+ return strings.HasPrefix(remain, match)
+ }
+
+ // Handle the exact match case
+ return match == remain
+}
+
+// LoginPath checks if the given path is used for logins
+func (r *Router) LoginPath(path string) bool {
+ r.l.RLock()
+ mount, raw, ok := r.root.LongestPrefix(path)
+ r.l.RUnlock()
+ if !ok {
+ return false
+ }
+ re := raw.(*routeEntry)
+
+ // Trim to get remaining path
+ remain := strings.TrimPrefix(path, mount)
+
+ // Check the loginPaths of this backend
+ match, raw, ok := re.loginPaths.LongestPrefix(remain)
+ if !ok {
+ return false
+ }
+ prefixMatch := raw.(bool)
+
+ // Handle the prefix match case
+ if prefixMatch {
+ return strings.HasPrefix(remain, match)
+ }
+
+ // Handle the exact match case
+ return match == remain
+}
+
+// pathsToRadix converts a the mapping of special paths to a mapping
+// of special paths to radix trees.
+func pathsToRadix(paths []string) *radix.Tree {
+ tree := radix.New()
+ for _, path := range paths {
+ // Check if this is a prefix or exact match
+ prefixMatch := len(path) >= 1 && path[len(path)-1] == '*'
+ if prefixMatch {
+ path = path[:len(path)-1]
+ }
+
+ tree.Insert(path, prefixMatch)
+ }
+
+ return tree
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/router_test.go b/vendor/github.com/hashicorp/vault/vault/router_test.go
new file mode 100644
index 0000000..e5de72e
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/router_test.go
@@ -0,0 +1,408 @@
+package vault
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/logical"
+)
+
+type NoopBackend struct {
+ sync.Mutex
+
+ Root []string
+ Login []string
+ Paths []string
+ Requests []*logical.Request
+ Response *logical.Response
+ Invalidations []string
+}
+
+func (n *NoopBackend) HandleRequest(req *logical.Request) (*logical.Response, error) {
+ n.Lock()
+ defer n.Unlock()
+
+ requestCopy := *req
+ n.Paths = append(n.Paths, req.Path)
+ n.Requests = append(n.Requests, &requestCopy)
+ if req.Storage == nil {
+ return nil, fmt.Errorf("missing view")
+ }
+
+ return n.Response, nil
+}
+
+func (n *NoopBackend) HandleExistenceCheck(req *logical.Request) (bool, bool, error) {
+ return false, false, nil
+}
+
+func (n *NoopBackend) SpecialPaths() *logical.Paths {
+ return &logical.Paths{
+ Root: n.Root,
+ Unauthenticated: n.Login,
+ }
+}
+
+func (n *NoopBackend) System() logical.SystemView {
+ return logical.StaticSystemView{
+ DefaultLeaseTTLVal: time.Hour * 24,
+ MaxLeaseTTLVal: time.Hour * 24 * 32,
+ }
+}
+
+func (n *NoopBackend) Cleanup() {
+ // noop
+}
+
+func (n *NoopBackend) InvalidateKey(k string) {
+ n.Invalidations = append(n.Invalidations, k)
+}
+
+func (n *NoopBackend) Initialize() error {
+ return nil
+}
+
+func TestRouter_Mount(t *testing.T) {
+ r := NewRouter()
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "logical/")
+
+ meUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ n := &NoopBackend{}
+ err = r.Mount(n, "prod/aws/", &MountEntry{Path: "prod/aws/", UUID: meUUID}, view)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ meUUID, err = uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = r.Mount(n, "prod/aws/", &MountEntry{UUID: meUUID}, view)
+ if !strings.Contains(err.Error(), "cannot mount under existing mount") {
+ t.Fatalf("err: %v", err)
+ }
+
+ if path := r.MatchingMount("prod/aws/foo"); path != "prod/aws/" {
+ t.Fatalf("bad: %s", path)
+ }
+
+ if v := r.MatchingStorageView("prod/aws/foo"); v != view {
+ t.Fatalf("bad: %v", v)
+ }
+
+ if path := r.MatchingMount("stage/aws/foo"); path != "" {
+ t.Fatalf("bad: %s", path)
+ }
+
+ if v := r.MatchingStorageView("stage/aws/foo"); v != nil {
+ t.Fatalf("bad: %v", v)
+ }
+
+ mount, prefix, ok := r.MatchingStoragePrefix("logical/foo")
+ if !ok {
+ t.Fatalf("missing storage prefix")
+ }
+ if mount != "prod/aws/" || prefix != "logical/" {
+ t.Fatalf("Bad: %v - %v", mount, prefix)
+ }
+
+ req := &logical.Request{
+ Path: "prod/aws/foo",
+ }
+ resp, err := r.Route(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+
+ // Verify the path
+ if len(n.Paths) != 1 || n.Paths[0] != "foo" {
+ t.Fatalf("bad: %v", n.Paths)
+ }
+}
+
+func TestRouter_Unmount(t *testing.T) {
+ r := NewRouter()
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "logical/")
+
+ meUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ n := &NoopBackend{}
+ err = r.Mount(n, "prod/aws/", &MountEntry{Path: "prod/aws/", UUID: meUUID}, view)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ err = r.Unmount("prod/aws/")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ req := &logical.Request{
+ Path: "prod/aws/foo",
+ }
+ _, err = r.Route(req)
+ if !strings.Contains(err.Error(), "unsupported path") {
+ t.Fatalf("err: %v", err)
+ }
+
+ if _, _, ok := r.MatchingStoragePrefix("logical/foo"); ok {
+ t.Fatalf("should not have matching storage prefix")
+ }
+}
+
+func TestRouter_Remount(t *testing.T) {
+ r := NewRouter()
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "logical/")
+
+ meUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ n := &NoopBackend{}
+ me := &MountEntry{Path: "prod/aws/", UUID: meUUID}
+ err = r.Mount(n, "prod/aws/", me, view)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ me.Path = "stage/aws/"
+ err = r.Remount("prod/aws/", "stage/aws/")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ err = r.Remount("prod/aws/", "stage/aws/")
+ if !strings.Contains(err.Error(), "no mount at") {
+ t.Fatalf("err: %v", err)
+ }
+
+ req := &logical.Request{
+ Path: "prod/aws/foo",
+ }
+ _, err = r.Route(req)
+ if !strings.Contains(err.Error(), "unsupported path") {
+ t.Fatalf("err: %v", err)
+ }
+
+ req = &logical.Request{
+ Path: "stage/aws/foo",
+ }
+ _, err = r.Route(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Verify the path
+ if len(n.Paths) != 1 || n.Paths[0] != "foo" {
+ t.Fatalf("bad: %v", n.Paths)
+ }
+
+ // Check the resolve from storage still works
+ mount, prefix, _ := r.MatchingStoragePrefix("logical/foobar")
+ if mount != "stage/aws/" {
+ t.Fatalf("bad mount: %s", mount)
+ }
+ if prefix != "logical/" {
+ t.Fatalf("Bad prefix: %s", prefix)
+ }
+}
+
+func TestRouter_RootPath(t *testing.T) {
+ r := NewRouter()
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "logical/")
+
+ meUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ n := &NoopBackend{
+ Root: []string{
+ "root",
+ "policy/*",
+ },
+ }
+ err = r.Mount(n, "prod/aws/", &MountEntry{UUID: meUUID}, view)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ type tcase struct {
+ path string
+ expect bool
+ }
+ tcases := []tcase{
+ {"random", false},
+ {"prod/aws/foo", false},
+ {"prod/aws/root", true},
+ {"prod/aws/root-more", false},
+ {"prod/aws/policy", false},
+ {"prod/aws/policy/", true},
+ {"prod/aws/policy/ops", true},
+ }
+
+ for _, tc := range tcases {
+ out := r.RootPath(tc.path)
+ if out != tc.expect {
+ t.Fatalf("bad: path: %s expect: %v got %v", tc.path, tc.expect, out)
+ }
+ }
+}
+
+func TestRouter_LoginPath(t *testing.T) {
+ r := NewRouter()
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "auth/")
+
+ meUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ n := &NoopBackend{
+ Login: []string{
+ "login",
+ "oauth/*",
+ },
+ }
+ err = r.Mount(n, "auth/foo/", &MountEntry{UUID: meUUID}, view)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ type tcase struct {
+ path string
+ expect bool
+ }
+ tcases := []tcase{
+ {"random", false},
+ {"auth/foo/bar", false},
+ {"auth/foo/login", true},
+ {"auth/foo/oauth", false},
+ {"auth/foo/oauth/redirect", true},
+ }
+
+ for _, tc := range tcases {
+ out := r.LoginPath(tc.path)
+ if out != tc.expect {
+ t.Fatalf("bad: path: %s expect: %v got %v", tc.path, tc.expect, out)
+ }
+ }
+}
+
+func TestRouter_Taint(t *testing.T) {
+ r := NewRouter()
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "logical/")
+
+ meUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ n := &NoopBackend{}
+ err = r.Mount(n, "prod/aws/", &MountEntry{UUID: meUUID}, view)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ err = r.Taint("prod/aws/")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ req := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "prod/aws/foo",
+ }
+ _, err = r.Route(req)
+ if err.Error() != "unsupported path" {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Rollback and Revoke should work
+ req.Operation = logical.RollbackOperation
+ _, err = r.Route(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ req.Operation = logical.RevokeOperation
+ _, err = r.Route(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func TestRouter_Untaint(t *testing.T) {
+ r := NewRouter()
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "logical/")
+
+ meUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ n := &NoopBackend{}
+ err = r.Mount(n, "prod/aws/", &MountEntry{UUID: meUUID}, view)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ err = r.Taint("prod/aws/")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ err = r.Untaint("prod/aws/")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ req := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "prod/aws/foo",
+ }
+ _, err = r.Route(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func TestPathsToRadix(t *testing.T) {
+ // Provide real paths
+ paths := []string{
+ "foo",
+ "foo/*",
+ "sub/bar*",
+ }
+ r := pathsToRadix(paths)
+
+ raw, ok := r.Get("foo")
+ if !ok || raw.(bool) != false {
+ t.Fatalf("bad: %v (foo)", raw)
+ }
+
+ raw, ok = r.Get("foo/")
+ if !ok || raw.(bool) != true {
+ t.Fatalf("bad: %v (foo/)", raw)
+ }
+
+ raw, ok = r.Get("sub/bar")
+ if !ok || raw.(bool) != true {
+ t.Fatalf("bad: %v (sub/bar)", raw)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/seal.go b/vendor/github.com/hashicorp/vault/vault/seal.go
new file mode 100644
index 0000000..305bb4b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/seal.go
@@ -0,0 +1,325 @@
+package vault
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/physical"
+
+ "github.com/keybase/go-crypto/openpgp"
+ "github.com/keybase/go-crypto/openpgp/packet"
+)
+
+const (
+ // barrierSealConfigPath is the path used to store our seal configuration.
+ // This value is stored in plaintext, since we must be able to read it even
+ // with the Vault sealed. This is required so that we know how many secret
+ // parts must be used to reconstruct the master key.
+ barrierSealConfigPath = "core/seal-config"
+
+ // recoverySealConfigPath is the path to the recovery key seal
+ // configuration. It is inside the barrier.
+ recoverySealConfigPath = "core/recovery-seal-config"
+
+ // recoveryKeyPath is the path to the recovery key
+ recoveryKeyPath = "core/recovery-key"
+)
+
+type KeyNotFoundError struct {
+ Err error
+}
+
+func (e *KeyNotFoundError) WrappedErrors() []error {
+ return []error{e.Err}
+}
+
+func (e *KeyNotFoundError) Error() string {
+ return e.Err.Error()
+}
+
+type Seal interface {
+ SetCore(*Core)
+ Init() error
+ Finalize() error
+
+ StoredKeysSupported() bool
+ SetStoredKeys([][]byte) error
+ GetStoredKeys() ([][]byte, error)
+
+ BarrierType() string
+ BarrierConfig() (*SealConfig, error)
+ SetBarrierConfig(*SealConfig) error
+
+ RecoveryKeySupported() bool
+ RecoveryType() string
+ RecoveryConfig() (*SealConfig, error)
+ SetRecoveryConfig(*SealConfig) error
+ SetRecoveryKey([]byte) error
+ VerifyRecoveryKey([]byte) error
+}
+
+type DefaultSeal struct {
+ config *SealConfig
+ core *Core
+}
+
+func (d *DefaultSeal) checkCore() error {
+ if d.core == nil {
+ return fmt.Errorf("seal does not have a core set")
+ }
+ return nil
+}
+
+func (d *DefaultSeal) SetCore(core *Core) {
+ d.core = core
+}
+
+func (d *DefaultSeal) Init() error {
+ return nil
+}
+
+func (d *DefaultSeal) Finalize() error {
+ return nil
+}
+
+func (d *DefaultSeal) BarrierType() string {
+ return "shamir"
+}
+
+func (d *DefaultSeal) StoredKeysSupported() bool {
+ return false
+}
+
+func (d *DefaultSeal) RecoveryKeySupported() bool {
+ return false
+}
+
+func (d *DefaultSeal) SetStoredKeys(keys [][]byte) error {
+ return fmt.Errorf("core: stored keys are not supported")
+}
+
+func (d *DefaultSeal) GetStoredKeys() ([][]byte, error) {
+ return nil, fmt.Errorf("core: stored keys are not supported")
+}
+
+func (d *DefaultSeal) BarrierConfig() (*SealConfig, error) {
+ if d.config != nil {
+ return d.config.Clone(), nil
+ }
+
+ if err := d.checkCore(); err != nil {
+ return nil, err
+ }
+
+ // Fetch the core configuration
+ pe, err := d.core.physical.Get(barrierSealConfigPath)
+ if err != nil {
+ d.core.logger.Error("core: failed to read seal configuration", "error", err)
+ return nil, fmt.Errorf("failed to check seal configuration: %v", err)
+ }
+
+ // If the seal configuration is missing, we are not initialized
+ if pe == nil {
+ d.core.logger.Info("core: seal configuration missing, not initialized")
+ return nil, nil
+ }
+
+ var conf SealConfig
+
+ // Decode the barrier entry
+ if err := jsonutil.DecodeJSON(pe.Value, &conf); err != nil {
+ d.core.logger.Error("core: failed to decode seal configuration", "error", err)
+ return nil, fmt.Errorf("failed to decode seal configuration: %v", err)
+ }
+
+ switch conf.Type {
+ // This case should not be valid for other types as only this is the default
+ case "":
+ conf.Type = d.BarrierType()
+ case d.BarrierType():
+ default:
+ d.core.logger.Error("core: barrier seal type does not match loaded type", "barrier_seal_type", conf.Type, "loaded_seal_type", d.BarrierType())
+ return nil, fmt.Errorf("barrier seal type of %s does not match loaded type of %s", conf.Type, d.BarrierType())
+ }
+
+ // Check for a valid seal configuration
+ if err := conf.Validate(); err != nil {
+ d.core.logger.Error("core: invalid seal configuration", "error", err)
+ return nil, fmt.Errorf("seal validation failed: %v", err)
+ }
+
+ d.config = &conf
+ return d.config.Clone(), nil
+}
+
+func (d *DefaultSeal) SetBarrierConfig(config *SealConfig) error {
+ if err := d.checkCore(); err != nil {
+ return err
+ }
+
+ // Provide a way to wipe out the cached value (also prevents actually
+ // saving a nil config)
+ if config == nil {
+ d.config = nil
+ return nil
+ }
+
+ config.Type = d.BarrierType()
+
+ // Encode the seal configuration
+ buf, err := json.Marshal(config)
+ if err != nil {
+ return fmt.Errorf("failed to encode seal configuration: %v", err)
+ }
+
+ // Store the seal configuration
+ pe := &physical.Entry{
+ Key: barrierSealConfigPath,
+ Value: buf,
+ }
+
+ if err := d.core.physical.Put(pe); err != nil {
+ d.core.logger.Error("core: failed to write seal configuration", "error", err)
+ return fmt.Errorf("failed to write seal configuration: %v", err)
+ }
+
+ d.config = config.Clone()
+
+ return nil
+}
+
+func (d *DefaultSeal) RecoveryType() string {
+ return "unsupported"
+}
+
+func (d *DefaultSeal) RecoveryConfig() (*SealConfig, error) {
+ return nil, fmt.Errorf("recovery not supported")
+}
+
+func (d *DefaultSeal) SetRecoveryConfig(config *SealConfig) error {
+ return fmt.Errorf("recovery not supported")
+}
+
+func (d *DefaultSeal) VerifyRecoveryKey([]byte) error {
+ return fmt.Errorf("recovery not supported")
+}
+
+func (d *DefaultSeal) SetRecoveryKey(key []byte) error {
+ return fmt.Errorf("recovery not supported")
+}
+
+// SealConfig is used to describe the seal configuration
+type SealConfig struct {
+ // The type, for sanity checking
+ Type string `json:"type"`
+
+ // SecretShares is the number of shares the secret is split into. This is
+ // the N value of Shamir.
+ SecretShares int `json:"secret_shares"`
+
+ // SecretThreshold is the number of parts required to open the vault. This
+ // is the T value of Shamir.
+ SecretThreshold int `json:"secret_threshold"`
+
+ // PGPKeys is the array of public PGP keys used, if requested, to encrypt
+ // the output unseal tokens. If provided, it sets the value of
+ // SecretShares. Ordering is important.
+ PGPKeys []string `json:"pgp_keys"`
+
+ // Nonce is a nonce generated by Vault used to ensure that when unseal keys
+ // are submitted for a rekey operation, the rekey operation itself is the
+ // one intended. This prevents hijacking of the rekey operation, since it
+ // is unauthenticated.
+ Nonce string `json:"nonce"`
+
+ // Backup indicates whether or not a backup of PGP-encrypted unseal keys
+ // should be stored at coreUnsealKeysBackupPath after successful rekeying.
+ Backup bool `json:"backup"`
+
+ // How many keys to store, for seals that support storage.
+ StoredShares int `json:"stored_shares"`
+}
+
+// Validate is used to sanity check the seal configuration
+func (s *SealConfig) Validate() error {
+ if s.SecretShares < 1 {
+ return fmt.Errorf("shares must be at least one")
+ }
+ if s.SecretThreshold < 1 {
+ return fmt.Errorf("threshold must be at least one")
+ }
+ if s.SecretShares > 1 && s.SecretThreshold == 1 {
+ return fmt.Errorf("threshold must be greater than one for multiple shares")
+ }
+ if s.SecretShares > 255 {
+ return fmt.Errorf("shares must be less than 256")
+ }
+ if s.SecretThreshold > 255 {
+ return fmt.Errorf("threshold must be less than 256")
+ }
+ if s.SecretThreshold > s.SecretShares {
+ return fmt.Errorf("threshold cannot be larger than shares")
+ }
+ if s.StoredShares > s.SecretShares {
+ return fmt.Errorf("stored keys cannot be larger than shares")
+ }
+ if len(s.PGPKeys) > 0 && len(s.PGPKeys) != s.SecretShares-s.StoredShares {
+ return fmt.Errorf("count mismatch between number of provided PGP keys and number of shares")
+ }
+ if len(s.PGPKeys) > 0 {
+ for _, keystring := range s.PGPKeys {
+ data, err := base64.StdEncoding.DecodeString(keystring)
+ if err != nil {
+ return fmt.Errorf("Error decoding given PGP key: %s", err)
+ }
+ _, err = openpgp.ReadEntity(packet.NewReader(bytes.NewBuffer(data)))
+ if err != nil {
+ return fmt.Errorf("Error parsing given PGP key: %s", err)
+ }
+ }
+ }
+ return nil
+}
+
+func (s *SealConfig) Clone() *SealConfig {
+ ret := &SealConfig{
+ Type: s.Type,
+ SecretShares: s.SecretShares,
+ SecretThreshold: s.SecretThreshold,
+ Nonce: s.Nonce,
+ Backup: s.Backup,
+ StoredShares: s.StoredShares,
+ }
+ if len(s.PGPKeys) > 0 {
+ ret.PGPKeys = make([]string, len(s.PGPKeys))
+ copy(ret.PGPKeys, s.PGPKeys)
+ }
+ return ret
+}
+
+type SealAccess struct {
+ seal Seal
+}
+
+func (s *SealAccess) SetSeal(seal Seal) {
+ s.seal = seal
+}
+
+func (s *SealAccess) StoredKeysSupported() bool {
+ return s.seal.StoredKeysSupported()
+}
+
+func (s *SealAccess) BarrierConfig() (*SealConfig, error) {
+ return s.seal.BarrierConfig()
+}
+
+func (s *SealAccess) RecoveryKeySupported() bool {
+ return s.seal.RecoveryKeySupported()
+}
+
+func (s *SealAccess) RecoveryConfig() (*SealConfig, error) {
+ return s.seal.RecoveryConfig()
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/seal_test.go b/vendor/github.com/hashicorp/vault/vault/seal_test.go
new file mode 100644
index 0000000..5d46fe3
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/seal_test.go
@@ -0,0 +1,42 @@
+package vault
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestDefaultSeal_Config(t *testing.T) {
+ bc, _ := TestSealDefConfigs()
+ // Change these to non-default values to ensure we are seeing the real
+ // config we set
+ bc.SecretShares = 4
+ bc.SecretThreshold = 2
+
+ core, _, _ := TestCoreUnsealed(t)
+
+ defSeal := &DefaultSeal{}
+ defSeal.SetCore(core)
+ err := defSeal.SetBarrierConfig(bc)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ newBc, err := defSeal.BarrierConfig()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(*bc, *newBc) {
+ t.Fatal("config mismatch")
+ }
+
+ // Now, test without the benefit of the cached value in the seal
+ defSeal = &DefaultSeal{}
+ defSeal.SetCore(core)
+ newBc, err = defSeal.BarrierConfig()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(*bc, *newBc) {
+ t.Fatal("config mismatch")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/seal_testing.go b/vendor/github.com/hashicorp/vault/vault/seal_testing.go
new file mode 100644
index 0000000..f74b140
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/seal_testing.go
@@ -0,0 +1,150 @@
+package vault
+
+import (
+ "bytes"
+ "fmt"
+ "testing"
+)
+
+var (
+ TestCoreUnsealedWithConfigs = testCoreUnsealedWithConfigs
+ TestSealDefConfigs = testSealDefConfigs
+)
+
+type TestSeal struct {
+ defseal *DefaultSeal
+ barrierKeys [][]byte
+ recoveryKey []byte
+ recoveryConfig *SealConfig
+ storedKeysDisabled bool
+ recoveryKeysDisabled bool
+}
+
+func newTestSeal(t *testing.T) Seal {
+ return &TestSeal{}
+}
+
+func (d *TestSeal) checkCore() error {
+ if d.defseal.core == nil {
+ return fmt.Errorf("seal does not have a core set")
+ }
+ return nil
+}
+
+func (d *TestSeal) SetCore(core *Core) {
+ d.defseal = &DefaultSeal{}
+ d.defseal.core = core
+}
+
+func (d *TestSeal) Init() error {
+ d.barrierKeys = [][]byte{}
+ return d.defseal.Init()
+}
+
+func (d *TestSeal) Finalize() error {
+ return d.defseal.Finalize()
+}
+
+func (d *TestSeal) BarrierType() string {
+ return "shamir"
+}
+
+func (d *TestSeal) StoredKeysSupported() bool {
+ return !d.storedKeysDisabled
+}
+
+func (d *TestSeal) RecoveryKeySupported() bool {
+ return !d.recoveryKeysDisabled
+}
+
+func (d *TestSeal) SetStoredKeys(keys [][]byte) error {
+ d.barrierKeys = keys
+ return nil
+}
+
+func (d *TestSeal) GetStoredKeys() ([][]byte, error) {
+ return d.barrierKeys, nil
+}
+
+func (d *TestSeal) BarrierConfig() (*SealConfig, error) {
+ return d.defseal.BarrierConfig()
+}
+
+func (d *TestSeal) SetBarrierConfig(config *SealConfig) error {
+ return d.defseal.SetBarrierConfig(config)
+}
+
+func (d *TestSeal) RecoveryType() string {
+ return "shamir"
+}
+
+func (d *TestSeal) RecoveryConfig() (*SealConfig, error) {
+ return d.recoveryConfig, nil
+}
+
+func (d *TestSeal) SetRecoveryConfig(config *SealConfig) error {
+ if config == nil {
+ return nil
+ }
+
+ d.recoveryConfig = config
+ return nil
+}
+
+func (d *TestSeal) VerifyRecoveryKey(key []byte) error {
+ if bytes.Equal(d.recoveryKey, key) {
+ return nil
+ }
+ return fmt.Errorf("not equivalent")
+}
+
+func (d *TestSeal) SetRecoveryKey(key []byte) error {
+ newbuf := bytes.NewBuffer(nil)
+ newbuf.Write(key)
+ d.recoveryKey = newbuf.Bytes()
+ return nil
+}
+
+func testCoreUnsealedWithConfigs(t *testing.T, barrierConf, recoveryConf *SealConfig) (*Core, [][]byte, [][]byte, string) {
+ seal := &TestSeal{}
+ core := TestCoreWithSeal(t, seal)
+ result, err := core.Initialize(&InitParams{
+ BarrierConfig: barrierConf,
+ RecoveryConfig: recoveryConf,
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ err = core.UnsealWithStoredKeys()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if sealed, _ := core.Sealed(); sealed {
+ for _, key := range result.SecretShares {
+ if _, err := core.Unseal(TestKeyCopy(key)); err != nil {
+ t.Fatalf("unseal err: %s", err)
+ }
+ }
+
+ sealed, err = core.Sealed()
+ if err != nil {
+ t.Fatalf("err checking seal status: %s", err)
+ }
+ if sealed {
+ t.Fatal("should not be sealed")
+ }
+ }
+
+ return core, result.SecretShares, result.RecoveryShares, result.RootToken
+}
+
+func testSealDefConfigs() (*SealConfig, *SealConfig) {
+ return &SealConfig{
+ SecretShares: 5,
+ SecretThreshold: 3,
+ StoredShares: 2,
+ }, &SealConfig{
+ SecretShares: 5,
+ SecretThreshold: 3,
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/testing.go b/vendor/github.com/hashicorp/vault/vault/testing.go
new file mode 100644
index 0000000..b567fe7
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/testing.go
@@ -0,0 +1,995 @@
+package vault
+
+import (
+ "bytes"
+ "crypto/rand"
+ "crypto/sha256"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/pem"
+ "fmt"
+ "net"
+ "net/http"
+ "os/exec"
+ "testing"
+ "time"
+
+ log "github.com/mgutz/logxi/v1"
+ "github.com/mitchellh/copystructure"
+
+ "golang.org/x/crypto/ssh"
+ "golang.org/x/net/http2"
+
+ cleanhttp "github.com/hashicorp/go-cleanhttp"
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/audit"
+ "github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/helper/salt"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ "github.com/hashicorp/vault/physical"
+)
+
+// This file contains a number of methods that are useful for unit
+// tests within other packages.
+
+const (
+ testSharedPublicKey = `
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9i+hFxZHGo6KblVme4zrAcJstR6I0PTJozW286X4WyvPnkMYDQ5mnhEYC7UWCvjoTWbPEXPX7NjhRtwQTGD67bV+lrxgfyzK1JZbUXK4PwgKJvQD+XyyWYMzDgGSQY61KUSqCxymSm/9NZkPU3ElaQ9xQuTzPpztM4ROfb8f2Yv6/ZESZsTo0MTAkp8Pcy+WkioI/uJ1H7zqs0EA4OMY4aDJRu0UtP4rTVeYNEAuRXdX+eH4aW3KMvhzpFTjMbaJHJXlEeUm2SaX5TNQyTOvghCeQILfYIL/Ca2ij8iwCmulwdV6eQGfd4VDu40PvSnmfoaE38o6HaPnX0kUcnKiT
+`
+ testSharedPrivateKey = `
+-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEAvYvoRcWRxqOim5VZnuM6wHCbLUeiND0yaM1tvOl+Fsrz55DG
+A0OZp4RGAu1Fgr46E1mzxFz1+zY4UbcEExg+u21fpa8YH8sytSWW1FyuD8ICib0A
+/l8slmDMw4BkkGOtSlEqgscpkpv/TWZD1NxJWkPcULk8z6c7TOETn2/H9mL+v2RE
+mbE6NDEwJKfD3MvlpIqCP7idR+86rNBAODjGOGgyUbtFLT+K01XmDRALkV3V/nh+
+GltyjL4c6RU4zG2iRyV5RHlJtkml+UzUMkzr4IQnkCC32CC/wmtoo/IsAprpcHVe
+nkBn3eFQ7uND70p5n6GhN/KOh2j519JFHJyokwIDAQABAoIBAHX7VOvBC3kCN9/x
++aPdup84OE7Z7MvpX6w+WlUhXVugnmsAAVDczhKoUc/WktLLx2huCGhsmKvyVuH+
+MioUiE+vx75gm3qGx5xbtmOfALVMRLopjCnJYf6EaFA0ZeQ+NwowNW7Lu0PHmAU8
+Z3JiX8IwxTz14DU82buDyewO7v+cEr97AnERe3PUcSTDoUXNaoNxjNpEJkKREY6h
+4hAY676RT/GsRcQ8tqe/rnCqPHNd7JGqL+207FK4tJw7daoBjQyijWuB7K5chSal
+oPInylM6b13ASXuOAOT/2uSUBWmFVCZPDCmnZxy2SdnJGbsJAMl7Ma3MUlaGvVI+
+Tfh1aQkCgYEA4JlNOabTb3z42wz6mz+Nz3JRwbawD+PJXOk5JsSnV7DtPtfgkK9y
+6FTQdhnozGWShAvJvc+C4QAihs9AlHXoaBY5bEU7R/8UK/pSqwzam+MmxmhVDV7G
+IMQPV0FteoXTaJSikhZ88mETTegI2mik+zleBpVxvfdhE5TR+lq8Br0CgYEA2AwJ
+CUD5CYUSj09PluR0HHqamWOrJkKPFPwa+5eiTTCzfBBxImYZh7nXnWuoviXC0sg2
+AuvCW+uZ48ygv/D8gcz3j1JfbErKZJuV+TotK9rRtNIF5Ub7qysP7UjyI7zCssVM
+kuDd9LfRXaB/qGAHNkcDA8NxmHW3gpln4CFdSY8CgYANs4xwfercHEWaJ1qKagAe
+rZyrMpffAEhicJ/Z65lB0jtG4CiE6w8ZeUMWUVJQVcnwYD+4YpZbX4S7sJ0B8Ydy
+AhkSr86D/92dKTIt2STk6aCN7gNyQ1vW198PtaAWH1/cO2UHgHOy3ZUt5X/Uwxl9
+cex4flln+1Viumts2GgsCQKBgCJH7psgSyPekK5auFdKEr5+Gc/jB8I/Z3K9+g4X
+5nH3G1PBTCJYLw7hRzw8W/8oALzvddqKzEFHphiGXK94Lqjt/A4q1OdbCrhiE68D
+My21P/dAKB1UYRSs9Y8CNyHCjuZM9jSMJ8vv6vG/SOJPsnVDWVAckAbQDvlTHC9t
+O98zAoGAcbW6uFDkrv0XMCpB9Su3KaNXOR0wzag+WIFQRXCcoTvxVi9iYfUReQPi
+oOyBJU/HMVvBfv4g+OVFLVgSwwm6owwsouZ0+D/LasbuHqYyqYqdyPJQYzWA2Y+F
++B6f4RoPdSXj24JHPg/ioRxjaj094UXJxua2yfkcecGNEuBQHSs=
+-----END RSA PRIVATE KEY-----
+`
+)
+
+// TestCore returns a pure in-memory, uninitialized core for testing.
+func TestCore(t testing.TB) *Core {
+ return TestCoreWithSeal(t, nil)
+}
+
+// TestCoreNewSeal returns an in-memory, ininitialized core with the new seal
+// configuration.
+func TestCoreNewSeal(t testing.TB) *Core {
+ return TestCoreWithSeal(t, &TestSeal{})
+}
+
+// TestCoreWithSeal returns a pure in-memory, uninitialized core with the
+// specified seal for testing.
+func TestCoreWithSeal(t testing.TB, testSeal Seal) *Core {
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+ physicalBackend := physical.NewInmem(logger)
+
+ conf := testCoreConfig(t, physicalBackend, logger)
+
+ if testSeal != nil {
+ conf.Seal = testSeal
+ }
+
+ c, err := NewCore(conf)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ return c
+}
+
+func testCoreConfig(t testing.TB, physicalBackend physical.Backend, logger log.Logger) *CoreConfig {
+ noopAudits := map[string]audit.Factory{
+ "noop": func(config *audit.BackendConfig) (audit.Backend, error) {
+ view := &logical.InmemStorage{}
+ view.Put(&logical.StorageEntry{
+ Key: "salt",
+ Value: []byte("foo"),
+ })
+ var err error
+ config.Salt, err = salt.NewSalt(view, &salt.Config{
+ HMAC: sha256.New,
+ HMACType: "hmac-sha256",
+ })
+ if err != nil {
+ t.Fatalf("error getting new salt: %v", err)
+ }
+ return &noopAudit{
+ Config: config,
+ }, nil
+ },
+ }
+ noopBackends := make(map[string]logical.Factory)
+ noopBackends["noop"] = func(config *logical.BackendConfig) (logical.Backend, error) {
+ b := new(framework.Backend)
+ b.Setup(config)
+ return b, nil
+ }
+ noopBackends["http"] = func(config *logical.BackendConfig) (logical.Backend, error) {
+ return new(rawHTTP), nil
+ }
+ logicalBackends := make(map[string]logical.Factory)
+ for backendName, backendFactory := range noopBackends {
+ logicalBackends[backendName] = backendFactory
+ }
+ logicalBackends["generic"] = LeasedPassthroughBackendFactory
+ for backendName, backendFactory := range testLogicalBackends {
+ logicalBackends[backendName] = backendFactory
+ }
+
+ conf := &CoreConfig{
+ Physical: physicalBackend,
+ AuditBackends: noopAudits,
+ LogicalBackends: logicalBackends,
+ CredentialBackends: noopBackends,
+ DisableMlock: true,
+ Logger: logger,
+ }
+
+ return conf
+}
+
+// TestCoreInit initializes the core with a single key, and returns
+// the key that must be used to unseal the core and a root token.
+func TestCoreInit(t testing.TB, core *Core) ([][]byte, string) {
+ return TestCoreInitClusterWrapperSetup(t, core, nil, func() (http.Handler, http.Handler) { return nil, nil })
+}
+
+func TestCoreInitClusterWrapperSetup(t testing.TB, core *Core, clusterAddrs []*net.TCPAddr, handlerSetupFunc func() (http.Handler, http.Handler)) ([][]byte, string) {
+ core.SetClusterListenerAddrs(clusterAddrs)
+ core.SetClusterSetupFuncs(handlerSetupFunc)
+ result, err := core.Initialize(&InitParams{
+ BarrierConfig: &SealConfig{
+ SecretShares: 3,
+ SecretThreshold: 3,
+ },
+ RecoveryConfig: &SealConfig{
+ SecretShares: 3,
+ SecretThreshold: 3,
+ },
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ return result.SecretShares, result.RootToken
+}
+
+func TestCoreUnseal(core *Core, key []byte) (bool, error) {
+ core.SetClusterSetupFuncs(func() (http.Handler, http.Handler) { return nil, nil })
+ return core.Unseal(key)
+}
+
+// TestCoreUnsealed returns a pure in-memory core that is already
+// initialized and unsealed.
+func TestCoreUnsealed(t testing.TB) (*Core, [][]byte, string) {
+ core := TestCore(t)
+ keys, token := TestCoreInit(t, core)
+ for _, key := range keys {
+ if _, err := TestCoreUnseal(core, TestKeyCopy(key)); err != nil {
+ t.Fatalf("unseal err: %s", err)
+ }
+ }
+
+ sealed, err := core.Sealed()
+ if err != nil {
+ t.Fatalf("err checking seal status: %s", err)
+ }
+ if sealed {
+ t.Fatal("should not be sealed")
+ }
+
+ return core, keys, token
+}
+
+func TestCoreUnsealedBackend(t testing.TB, backend physical.Backend) (*Core, [][]byte, string) {
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+ conf := testCoreConfig(t, backend, logger)
+ conf.Seal = &TestSeal{}
+
+ core, err := NewCore(conf)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ keys, token := TestCoreInit(t, core)
+ for _, key := range keys {
+ if _, err := TestCoreUnseal(core, TestKeyCopy(key)); err != nil {
+ t.Fatalf("unseal err: %s", err)
+ }
+ }
+
+ sealed, err := core.Sealed()
+ if err != nil {
+ t.Fatalf("err checking seal status: %s", err)
+ }
+ if sealed {
+ t.Fatal("should not be sealed")
+ }
+
+ return core, keys, token
+}
+
+func testTokenStore(t testing.TB, c *Core) *TokenStore {
+ me := &MountEntry{
+ Table: credentialTableType,
+ Path: "token/",
+ Type: "token",
+ Description: "token based credentials",
+ }
+
+ meUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ me.UUID = meUUID
+
+ view := NewBarrierView(c.barrier, credentialBarrierPrefix+me.UUID+"/")
+ sysView := c.mountEntrySysView(me)
+
+ tokenstore, _ := c.newCredentialBackend("token", sysView, view, nil)
+ if err := tokenstore.Initialize(); err != nil {
+ panic(err)
+ }
+ ts := tokenstore.(*TokenStore)
+
+ router := NewRouter()
+ router.Mount(ts, "auth/token/", &MountEntry{Table: credentialTableType, UUID: ""}, ts.view)
+
+ subview := c.systemBarrierView.SubView(expirationSubPath)
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ exp := NewExpirationManager(router, subview, ts, logger)
+ ts.SetExpirationManager(exp)
+
+ return ts
+}
+
+// TestCoreWithTokenStore returns an in-memory core that has a token store
+// mounted, so that logical token functions can be used
+func TestCoreWithTokenStore(t testing.TB) (*Core, *TokenStore, [][]byte, string) {
+ c, keys, root := TestCoreUnsealed(t)
+ ts := testTokenStore(t, c)
+
+ return c, ts, keys, root
+}
+
+// TestCoreWithBackendTokenStore returns a core that has a token store
+// mounted and used the provided physical backend, so that logical token
+// functions can be used
+func TestCoreWithBackendTokenStore(t testing.TB, backend physical.Backend) (*Core, *TokenStore, [][]byte, string) {
+ c, keys, root := TestCoreUnsealedBackend(t, backend)
+ ts := testTokenStore(t, c)
+
+ return c, ts, keys, root
+}
+
+// TestKeyCopy is a silly little function to just copy the key so that
+// it can be used with Unseal easily.
+func TestKeyCopy(key []byte) []byte {
+ result := make([]byte, len(key))
+ copy(result, key)
+ return result
+}
+
+var testLogicalBackends = map[string]logical.Factory{}
+
+// Starts the test server which responds to SSH authentication.
+// Used to test the SSH secret backend.
+func StartSSHHostTestServer() (string, error) {
+ pubKey, _, _, _, err := ssh.ParseAuthorizedKey([]byte(testSharedPublicKey))
+ if err != nil {
+ return "", fmt.Errorf("Error parsing public key")
+ }
+ serverConfig := &ssh.ServerConfig{
+ PublicKeyCallback: func(conn ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) {
+ if bytes.Compare(pubKey.Marshal(), key.Marshal()) == 0 {
+ return &ssh.Permissions{}, nil
+ } else {
+ return nil, fmt.Errorf("Key does not match")
+ }
+ },
+ }
+ signer, err := ssh.ParsePrivateKey([]byte(testSharedPrivateKey))
+ if err != nil {
+ panic("Error parsing private key")
+ }
+ serverConfig.AddHostKey(signer)
+
+ soc, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ return "", fmt.Errorf("Error listening to connection")
+ }
+
+ go func() {
+ for {
+ conn, err := soc.Accept()
+ if err != nil {
+ panic(fmt.Sprintf("Error accepting incoming connection: %s", err))
+ }
+ defer conn.Close()
+ sshConn, chanReqs, _, err := ssh.NewServerConn(conn, serverConfig)
+ if err != nil {
+ panic(fmt.Sprintf("Handshaking error: %v", err))
+ }
+
+ go func() {
+ for chanReq := range chanReqs {
+ go func(chanReq ssh.NewChannel) {
+ if chanReq.ChannelType() != "session" {
+ chanReq.Reject(ssh.UnknownChannelType, "unknown channel type")
+ return
+ }
+
+ ch, requests, err := chanReq.Accept()
+ if err != nil {
+ panic(fmt.Sprintf("Error accepting channel: %s", err))
+ }
+
+ go func(ch ssh.Channel, in <-chan *ssh.Request) {
+ for req := range in {
+ executeServerCommand(ch, req)
+ }
+ }(ch, requests)
+ }(chanReq)
+ }
+ sshConn.Close()
+ }()
+ }
+ }()
+ return soc.Addr().String(), nil
+}
+
+// This executes the commands requested to be run on the server.
+// Used to test the SSH secret backend.
+func executeServerCommand(ch ssh.Channel, req *ssh.Request) {
+ command := string(req.Payload[4:])
+ cmd := exec.Command("/bin/bash", []string{"-c", command}...)
+ req.Reply(true, nil)
+
+ cmd.Stdout = ch
+ cmd.Stderr = ch
+ cmd.Stdin = ch
+
+ err := cmd.Start()
+ if err != nil {
+ panic(fmt.Sprintf("Error starting the command: '%s'", err))
+ }
+
+ go func() {
+ _, err := cmd.Process.Wait()
+ if err != nil {
+ panic(fmt.Sprintf("Error while waiting for command to finish:'%s'", err))
+ }
+ ch.Close()
+ }()
+}
+
+// This adds a logical backend for the test core. This needs to be
+// invoked before the test core is created.
+func AddTestLogicalBackend(name string, factory logical.Factory) error {
+ if name == "" {
+ return fmt.Errorf("Missing backend name")
+ }
+ if factory == nil {
+ return fmt.Errorf("Missing backend factory function")
+ }
+ testLogicalBackends[name] = factory
+ return nil
+}
+
+type noopAudit struct {
+ Config *audit.BackendConfig
+}
+
+func (n *noopAudit) GetHash(data string) string {
+ return n.Config.Salt.GetIdentifiedHMAC(data)
+}
+
+func (n *noopAudit) LogRequest(a *logical.Auth, r *logical.Request, e error) error {
+ return nil
+}
+
+func (n *noopAudit) LogResponse(a *logical.Auth, r *logical.Request, re *logical.Response, err error) error {
+ return nil
+}
+
+func (n *noopAudit) Reload() error {
+ return nil
+}
+
+type rawHTTP struct{}
+
+func (n *rawHTTP) HandleRequest(req *logical.Request) (*logical.Response, error) {
+ return &logical.Response{
+ Data: map[string]interface{}{
+ logical.HTTPStatusCode: 200,
+ logical.HTTPContentType: "plain/text",
+ logical.HTTPRawBody: []byte("hello world"),
+ },
+ }, nil
+}
+
+func (n *rawHTTP) HandleExistenceCheck(req *logical.Request) (bool, bool, error) {
+ return false, false, nil
+}
+
+func (n *rawHTTP) SpecialPaths() *logical.Paths {
+ return &logical.Paths{Unauthenticated: []string{"*"}}
+}
+
+func (n *rawHTTP) System() logical.SystemView {
+ return logical.StaticSystemView{
+ DefaultLeaseTTLVal: time.Hour * 24,
+ MaxLeaseTTLVal: time.Hour * 24 * 32,
+ }
+}
+
+func (n *rawHTTP) Cleanup() {
+ // noop
+}
+
+func (n *rawHTTP) Initialize() error {
+ // noop
+ return nil
+}
+
+func (n *rawHTTP) InvalidateKey(string) {
+ // noop
+}
+
+func GenerateRandBytes(length int) ([]byte, error) {
+ if length < 0 {
+ return nil, fmt.Errorf("length must be >= 0")
+ }
+
+ buf := make([]byte, length)
+ if length == 0 {
+ return buf, nil
+ }
+
+ n, err := rand.Read(buf)
+ if err != nil {
+ return nil, err
+ }
+ if n != length {
+ return nil, fmt.Errorf("unable to read %d bytes; only read %d", length, n)
+ }
+
+ return buf, nil
+}
+
+func TestWaitActive(t testing.TB, core *Core) {
+ start := time.Now()
+ var standby bool
+ var err error
+ for time.Now().Sub(start) < time.Second {
+ standby, err = core.Standby()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !standby {
+ break
+ }
+ }
+ if standby {
+ t.Fatalf("should not be in standby mode")
+ }
+}
+
+type TestListener struct {
+ net.Listener
+ Address *net.TCPAddr
+}
+
+type TestClusterCore struct {
+ *Core
+ Listeners []*TestListener
+ Root string
+ BarrierKeys [][]byte
+ CACertBytes []byte
+ CACert *x509.Certificate
+ TLSConfig *tls.Config
+ ClusterID string
+ Client *api.Client
+}
+
+func (t *TestClusterCore) CloseListeners() {
+ if t.Listeners != nil {
+ for _, ln := range t.Listeners {
+ ln.Close()
+ }
+ }
+ // Give time to actually shut down/clean up before the next test
+ time.Sleep(time.Second)
+}
+
+func TestCluster(t testing.TB, handlers []http.Handler, base *CoreConfig, unsealStandbys bool) []*TestClusterCore {
+ if handlers == nil || len(handlers) != 3 {
+ t.Fatal("handlers must be size 3")
+ }
+
+ //
+ // TLS setup
+ //
+ block, _ := pem.Decode([]byte(TestClusterCACert))
+ if block == nil {
+ t.Fatal("error decoding cluster CA cert")
+ }
+ caBytes := block.Bytes
+ caCert, err := x509.ParseCertificate(caBytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ serverCert, err := tls.X509KeyPair([]byte(TestClusterServerCert), []byte(TestClusterServerKey))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ rootCAs := x509.NewCertPool()
+ rootCAs.AppendCertsFromPEM([]byte(TestClusterCACert))
+ tlsConfig := &tls.Config{
+ Certificates: []tls.Certificate{serverCert},
+ RootCAs: rootCAs,
+ ClientCAs: rootCAs,
+ ClientAuth: tls.VerifyClientCertIfGiven,
+ }
+ tlsConfig.BuildNameToCertificate()
+
+ // Sanity checking
+ block, _ = pem.Decode([]byte(TestClusterServerCert))
+ if block == nil {
+ t.Fatal(err)
+ }
+ parsedServerCert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ chains, err := parsedServerCert.Verify(x509.VerifyOptions{
+ DNSName: "127.0.0.1",
+ Roots: rootCAs,
+ KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if chains == nil || len(chains) == 0 {
+ t.Fatal("no verified chains for server auth")
+ }
+ chains, err = parsedServerCert.Verify(x509.VerifyOptions{
+ DNSName: "127.0.0.1",
+ Roots: rootCAs,
+ KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if chains == nil || len(chains) == 0 {
+ t.Fatal("no verified chains for chains auth")
+ }
+
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ //
+ // Listener setup
+ //
+ ln, err := net.ListenTCP("tcp", &net.TCPAddr{
+ IP: net.ParseIP("127.0.0.1"),
+ Port: 0,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ c1lns := []*TestListener{&TestListener{
+ Listener: tls.NewListener(ln, tlsConfig),
+ Address: ln.Addr().(*net.TCPAddr),
+ },
+ }
+ ln, err = net.ListenTCP("tcp", &net.TCPAddr{
+ IP: net.ParseIP("127.0.0.1"),
+ Port: 0,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ c1lns = append(c1lns, &TestListener{
+ Listener: tls.NewListener(ln, tlsConfig),
+ Address: ln.Addr().(*net.TCPAddr),
+ })
+ server1 := &http.Server{
+ Handler: handlers[0],
+ }
+ if err := http2.ConfigureServer(server1, nil); err != nil {
+ t.Fatal(err)
+ }
+ for _, ln := range c1lns {
+ go server1.Serve(ln)
+ }
+
+ ln, err = net.ListenTCP("tcp", &net.TCPAddr{
+ IP: net.ParseIP("127.0.0.1"),
+ Port: 0,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ c2lns := []*TestListener{&TestListener{
+ Listener: tls.NewListener(ln, tlsConfig),
+ Address: ln.Addr().(*net.TCPAddr),
+ },
+ }
+ server2 := &http.Server{
+ Handler: handlers[1],
+ }
+ if err := http2.ConfigureServer(server2, nil); err != nil {
+ t.Fatal(err)
+ }
+ for _, ln := range c2lns {
+ go server2.Serve(ln)
+ }
+
+ ln, err = net.ListenTCP("tcp", &net.TCPAddr{
+ IP: net.ParseIP("127.0.0.1"),
+ Port: 0,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ c3lns := []*TestListener{&TestListener{
+ Listener: tls.NewListener(ln, tlsConfig),
+ Address: ln.Addr().(*net.TCPAddr),
+ },
+ }
+ server3 := &http.Server{
+ Handler: handlers[2],
+ }
+ if err := http2.ConfigureServer(server3, nil); err != nil {
+ t.Fatal(err)
+ }
+ for _, ln := range c3lns {
+ go server3.Serve(ln)
+ }
+
+ // Create three cores with the same physical and different redirect/cluster addrs
+ // N.B.: On OSX, instead of random ports, it assigns new ports to new
+ // listeners sequentially. Aside from being a bad idea in a security sense,
+ // it also broke tests that assumed it was OK to just use the port above
+ // the redirect addr. This has now been changed to 10 ports above, but if
+ // we ever do more than three nodes in a cluster it may need to be bumped.
+ coreConfig := &CoreConfig{
+ LogicalBackends: make(map[string]logical.Factory),
+ CredentialBackends: make(map[string]logical.Factory),
+ AuditBackends: make(map[string]audit.Factory),
+ RedirectAddr: fmt.Sprintf("https://127.0.0.1:%d", c1lns[0].Address.Port),
+ ClusterAddr: fmt.Sprintf("https://127.0.0.1:%d", c1lns[0].Address.Port+10),
+ DisableMlock: true,
+ }
+
+ if base != nil {
+ if base.Physical != nil {
+ coreConfig.Physical = base.Physical
+ }
+
+ if base.HAPhysical != nil {
+ coreConfig.HAPhysical = base.HAPhysical
+ }
+
+ // Used to set something non-working to test fallback
+ switch base.ClusterAddr {
+ case "empty":
+ coreConfig.ClusterAddr = ""
+ case "":
+ default:
+ coreConfig.ClusterAddr = base.ClusterAddr
+ }
+
+ if base.LogicalBackends != nil {
+ for k, v := range base.LogicalBackends {
+ coreConfig.LogicalBackends[k] = v
+ }
+ }
+ if base.CredentialBackends != nil {
+ for k, v := range base.CredentialBackends {
+ coreConfig.CredentialBackends[k] = v
+ }
+ }
+ if base.AuditBackends != nil {
+ for k, v := range base.AuditBackends {
+ coreConfig.AuditBackends[k] = v
+ }
+ }
+ if base.Logger != nil {
+ coreConfig.Logger = base.Logger
+ }
+ }
+
+ if coreConfig.Physical == nil {
+ coreConfig.Physical = physical.NewInmem(logger)
+ }
+ if coreConfig.HAPhysical == nil {
+ coreConfig.HAPhysical = physical.NewInmemHA(logger)
+ }
+
+ c1, err := NewCore(coreConfig)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ coreConfig.RedirectAddr = fmt.Sprintf("https://127.0.0.1:%d", c2lns[0].Address.Port)
+ if coreConfig.ClusterAddr != "" {
+ coreConfig.ClusterAddr = fmt.Sprintf("https://127.0.0.1:%d", c2lns[0].Address.Port+10)
+ }
+ c2, err := NewCore(coreConfig)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ coreConfig.RedirectAddr = fmt.Sprintf("https://127.0.0.1:%d", c3lns[0].Address.Port)
+ if coreConfig.ClusterAddr != "" {
+ coreConfig.ClusterAddr = fmt.Sprintf("https://127.0.0.1:%d", c3lns[0].Address.Port+10)
+ }
+ c3, err := NewCore(coreConfig)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ //
+ // Clustering setup
+ //
+ clusterAddrGen := func(lns []*TestListener) []*net.TCPAddr {
+ ret := make([]*net.TCPAddr, len(lns))
+ for i, ln := range lns {
+ ret[i] = &net.TCPAddr{
+ IP: ln.Address.IP,
+ Port: ln.Address.Port + 10,
+ }
+ }
+ return ret
+ }
+
+ c2.SetClusterListenerAddrs(clusterAddrGen(c2lns))
+ c2.SetClusterSetupFuncs(WrapHandlerForClustering(handlers[1], logger))
+ c3.SetClusterListenerAddrs(clusterAddrGen(c3lns))
+ c3.SetClusterSetupFuncs(WrapHandlerForClustering(handlers[2], logger))
+ keys, root := TestCoreInitClusterWrapperSetup(t, c1, clusterAddrGen(c1lns), WrapHandlerForClustering(handlers[0], logger))
+ for _, key := range keys {
+ if _, err := c1.Unseal(TestKeyCopy(key)); err != nil {
+ t.Fatalf("unseal err: %s", err)
+ }
+ }
+
+ // Verify unsealed
+ sealed, err := c1.Sealed()
+ if err != nil {
+ t.Fatalf("err checking seal status: %s", err)
+ }
+ if sealed {
+ t.Fatal("should not be sealed")
+ }
+
+ TestWaitActive(t, c1)
+
+ if unsealStandbys {
+ for _, key := range keys {
+ if _, err := c2.Unseal(TestKeyCopy(key)); err != nil {
+ t.Fatalf("unseal err: %s", err)
+ }
+ }
+ for _, key := range keys {
+ if _, err := c3.Unseal(TestKeyCopy(key)); err != nil {
+ t.Fatalf("unseal err: %s", err)
+ }
+ }
+
+ // Let them come fully up to standby
+ time.Sleep(2 * time.Second)
+
+ // Ensure cluster connection info is populated
+ isLeader, _, err := c2.Leader()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if isLeader {
+ t.Fatal("c2 should not be leader")
+ }
+ isLeader, _, err = c3.Leader()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if isLeader {
+ t.Fatal("c3 should not be leader")
+ }
+ }
+
+ cluster, err := c1.Cluster()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ getAPIClient := func(port int) *api.Client {
+ transport := cleanhttp.DefaultPooledTransport()
+ transport.TLSClientConfig = tlsConfig
+ client := &http.Client{
+ Transport: transport,
+ CheckRedirect: func(*http.Request, []*http.Request) error {
+ // This can of course be overridden per-test by using its own client
+ return fmt.Errorf("redirects not allowed in these tests")
+ },
+ }
+ config := api.DefaultConfig()
+ config.Address = fmt.Sprintf("https://127.0.0.1:%d", port)
+ config.HttpClient = client
+ apiClient, err := api.NewClient(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ apiClient.SetToken(root)
+ return apiClient
+ }
+
+ var ret []*TestClusterCore
+ keyCopies, _ := copystructure.Copy(keys)
+ ret = append(ret, &TestClusterCore{
+ Core: c1,
+ Listeners: c1lns,
+ Root: root,
+ BarrierKeys: keyCopies.([][]byte),
+ CACertBytes: caBytes,
+ CACert: caCert,
+ TLSConfig: tlsConfig,
+ ClusterID: cluster.ID,
+ Client: getAPIClient(c1lns[0].Address.Port),
+ })
+
+ keyCopies, _ = copystructure.Copy(keys)
+ ret = append(ret, &TestClusterCore{
+ Core: c2,
+ Listeners: c2lns,
+ Root: root,
+ BarrierKeys: keyCopies.([][]byte),
+ CACertBytes: caBytes,
+ CACert: caCert,
+ TLSConfig: tlsConfig,
+ ClusterID: cluster.ID,
+ Client: getAPIClient(c2lns[0].Address.Port),
+ })
+
+ keyCopies, _ = copystructure.Copy(keys)
+ ret = append(ret, &TestClusterCore{
+ Core: c3,
+ Listeners: c3lns,
+ Root: root,
+ BarrierKeys: keyCopies.([][]byte),
+ CACertBytes: caBytes,
+ CACert: caCert,
+ TLSConfig: tlsConfig,
+ ClusterID: cluster.ID,
+ Client: getAPIClient(c3lns[0].Address.Port),
+ })
+
+ return ret
+}
+
+const (
+ TestClusterCACert = `-----BEGIN CERTIFICATE-----
+MIIDPjCCAiagAwIBAgIUfIKsF2VPT7sdFcKOHJH2Ii6K4MwwDQYJKoZIhvcNAQEL
+BQAwFjEUMBIGA1UEAxMLbXl2YXVsdC5jb20wIBcNMTYwNTAyMTYwNTQyWhgPMjA2
+NjA0MjAxNjA2MTJaMBYxFDASBgNVBAMTC215dmF1bHQuY29tMIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuOimEXawD2qBoLCFP3Skq5zi1XzzcMAJlfdS
+xz9hfymuJb+cN8rB91HOdU9wQCwVKnkUtGWxUnMp0tT0uAZj5NzhNfyinf0JGAbP
+67HDzVZhGBHlHTjPX0638yaiUx90cTnucX0N20SgCYct29dMSgcPl+W78D3Jw3xE
+JsHQPYS9ASe2eONxG09F/qNw7w/RO5/6WYoV2EmdarMMxq52pPe2chtNMQdSyOUb
+cCcIZyk4QVFZ1ZLl6jTnUPb+JoCx1uMxXvMek4NF/5IL0Wr9dw2gKXKVKoHDr6SY
+WrCONRw61A5Zwx1V+kn73YX3USRlkufQv/ih6/xThYDAXDC9cwIDAQABo4GBMH8w
+DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFOuKvPiU
+G06iHkRXAOeMiUdBfHFyMB8GA1UdIwQYMBaAFOuKvPiUG06iHkRXAOeMiUdBfHFy
+MBwGA1UdEQQVMBOCC215dmF1bHQuY29thwR/AAABMA0GCSqGSIb3DQEBCwUAA4IB
+AQBcN/UdAMzc7UjRdnIpZvO+5keBGhL/vjltnGM1dMWYHa60Y5oh7UIXF+P1RdNW
+n7g80lOyvkSR15/r1rDkqOK8/4oruXU31EcwGhDOC4hU6yMUy4ltV/nBoodHBXNh
+MfKiXeOstH1vdI6G0P6W93Bcww6RyV1KH6sT2dbETCw+iq2VN9CrruGIWzd67UT/
+spe/kYttr3UYVV3O9kqgffVVgVXg/JoRZ3J7Hy2UEXfh9UtWNanDlRuXaZgE9s/d
+CpA30CHpNXvKeyNeW2ktv+2nAbSpvNW+e6MecBCTBIoDSkgU8ShbrzmDKVwNN66Q
+5gn6KxUPBKHEtNzs5DgGM7nq
+-----END CERTIFICATE-----`
+
+ TestClusterCAKey = `-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAuOimEXawD2qBoLCFP3Skq5zi1XzzcMAJlfdSxz9hfymuJb+c
+N8rB91HOdU9wQCwVKnkUtGWxUnMp0tT0uAZj5NzhNfyinf0JGAbP67HDzVZhGBHl
+HTjPX0638yaiUx90cTnucX0N20SgCYct29dMSgcPl+W78D3Jw3xEJsHQPYS9ASe2
+eONxG09F/qNw7w/RO5/6WYoV2EmdarMMxq52pPe2chtNMQdSyOUbcCcIZyk4QVFZ
+1ZLl6jTnUPb+JoCx1uMxXvMek4NF/5IL0Wr9dw2gKXKVKoHDr6SYWrCONRw61A5Z
+wx1V+kn73YX3USRlkufQv/ih6/xThYDAXDC9cwIDAQABAoIBAG3bCo7ljMQb6tel
+CAUjL5Ilqz5a9ebOsONABRYLOclq4ePbatxawdJF7/sSLwZxKkIJnZtvr2Hkubxg
+eOO8KC0YbVS9u39Rjc2QfobxHfsojpbWSuCJl+pvwinbkiUAUxXR7S/PtCPJKat/
+fGdYCiMQ/tqnynh4vR4+/d5o12c0KuuQ22/MdEf3GOadUamRXS1ET9iJWqla1pJW
+TmzrlkGAEnR5PPO2RMxbnZCYmj3dArxWAnB57W+bWYla0DstkDKtwg2j2ikNZpXB
+nkZJJpxR76IYD1GxfwftqAKxujKcyfqB0dIKCJ0UmfOkauNWjexroNLwaAOC3Nud
+XIxppAECgYEA1wJ9EH6A6CrSjdzUocF9LtQy1LCDHbdiQFHxM5/zZqIxraJZ8Gzh
+Q0d8JeOjwPdG4zL9pHcWS7+x64Wmfn0+Qfh6/47Vy3v90PIL0AeZYshrVZyJ/s6X
+YkgFK80KEuWtacqIZ1K2UJyCw81u/ynIl2doRsIbgkbNeN0opjmqVTMCgYEA3CkW
+2fETWK1LvmgKFjG1TjOotVRIOUfy4iN0kznPm6DK2PgTF5DX5RfktlmA8i8WPmB7
+YFOEdAWHf+RtoM/URa7EAGZncCWe6uggAcWqznTS619BJ63OmncpSWov5Byg90gJ
+48qIMY4wDjE85ypz1bmBc2Iph974dtWeDtB7dsECgYAyKZh4EquMfwEkq9LH8lZ8
+aHF7gbr1YeWAUB3QB49H8KtacTg+iYh8o97pEBUSXh6hvzHB/y6qeYzPAB16AUpX
+Jdu8Z9ylXsY2y2HKJRu6GjxAewcO9bAH8/mQ4INrKT6uIdx1Dq0OXZV8jR9KVLtB
+55RCfeLhIBesDR0Auw9sVQKBgB0xTZhkgP43LF35Ca1btgDClNJGdLUztx8JOIH1
+HnQyY/NVIaL0T8xO2MLdJ131pGts+68QI/YGbaslrOuv4yPCQrcS3RBfzKy1Ttkt
+TrLFhtoy7T7HqyeMOWtEq0kCCs3/PWB5EIoRoomfOcYlOOrUCDg2ge9EP4nyVVz9
+hAGBAoGBAJXw/ufevxpBJJMSyULmVWYr34GwLC1OhSE6AVVt9JkIYnc5L4xBKTHP
+QNKKJLmFmMsEqfxHUNWmpiHkm2E0p37Zehui3kywo+A4ybHPTua70ZWQfZhKxLUr
+PvJa8JmwiCM7kO8zjOv+edY1mMWrbjAZH1YUbfcTHmST7S8vp0F3
+-----END RSA PRIVATE KEY-----`
+
+ TestClusterServerCert = `-----BEGIN CERTIFICATE-----
+MIIDtzCCAp+gAwIBAgIUBLqh6ctGWVDUxFhxJX7m6S/bnrcwDQYJKoZIhvcNAQEL
+BQAwFjEUMBIGA1UEAxMLbXl2YXVsdC5jb20wIBcNMTYwNTAyMTYwOTI2WhgPMjA2
+NjA0MjAxNTA5NTZaMBsxGTAXBgNVBAMTEGNlcnQubXl2YXVsdC5jb20wggEiMA0G
+CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDY3gPB29kkdbu0mPO6J0efagQhSiXB
+9OyDuLf5sMk6CVDWVWal5hISkyBmw/lXgF7qC2XFKivpJOrcGQd5Ep9otBqyJLzI
+b0IWdXuPIrVnXDwcdWr86ybX2iC42zKWfbXgjzGijeAVpl0UJLKBj+fk5q6NvkRL
+5FUL6TRV7Krn9mrmnrV9J5IqV15pTd9W2aVJ6IqWvIPCACtZKulqWn4707uy2X2W
+1Stq/5qnp1pDshiGk1VPyxCwQ6yw3iEcgecbYo3vQfhWcv7Q8LpSIM9ZYpXu6OmF
++czqRZS9gERl+wipmmrN1MdYVrTuQem21C/PNZ4jo4XUk1SFx6JrcA+lAgMBAAGj
+gfUwgfIwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBSe
+Cl9WV3BjGCwmS/KrDSLRjfwyqjAfBgNVHSMEGDAWgBTrirz4lBtOoh5EVwDnjIlH
+QXxxcjA7BggrBgEFBQcBAQQvMC0wKwYIKwYBBQUHMAKGH2h0dHA6Ly8xMjcuMC4w
+LjE6ODIwMC92MS9wa2kvY2EwIQYDVR0RBBowGIIQY2VydC5teXZhdWx0LmNvbYcE
+fwAAATAxBgNVHR8EKjAoMCagJKAihiBodHRwOi8vMTI3LjAuMC4xOjgyMDAvdjEv
+cGtpL2NybDANBgkqhkiG9w0BAQsFAAOCAQEAWGholPN8buDYwKbUiDavbzjsxUIX
+lU4MxEqOHw7CD3qIYIauPboLvB9EldBQwhgOOy607Yvdg3rtyYwyBFwPhHo/hK3Z
+6mn4hc6TF2V+AUdHBvGzp2dbYLeo8noVoWbQ/lBulggwlIHNNF6+a3kALqsqk1Ch
+f/hzsjFnDhAlNcYFgG8TgfE2lE/FckvejPqBffo7Q3I+wVAw0buqiz5QL81NOT+D
+Y2S9LLKLRaCsWo9wRU1Az4Rhd7vK5SEMh16jJ82GyEODWPvuxOTI1MnzfnbWyLYe
+TTp6YBjGMVf1I6NEcWNur7U17uIOiQjMZ9krNvoMJ1A/cxCoZ98QHgcIPg==
+-----END CERTIFICATE-----`
+
+ TestClusterServerKey = `-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEA2N4DwdvZJHW7tJjzuidHn2oEIUolwfTsg7i3+bDJOglQ1lVm
+peYSEpMgZsP5V4Be6gtlxSor6STq3BkHeRKfaLQasiS8yG9CFnV7jyK1Z1w8HHVq
+/Osm19oguNsyln214I8xoo3gFaZdFCSygY/n5Oaujb5ES+RVC+k0Veyq5/Zq5p61
+fSeSKldeaU3fVtmlSeiKlryDwgArWSrpalp+O9O7stl9ltUrav+ap6daQ7IYhpNV
+T8sQsEOssN4hHIHnG2KN70H4VnL+0PC6UiDPWWKV7ujphfnM6kWUvYBEZfsIqZpq
+zdTHWFa07kHpttQvzzWeI6OF1JNUhceia3APpQIDAQABAoIBAQCH3vEzr+3nreug
+RoPNCXcSJXXY9X+aeT0FeeGqClzIg7Wl03OwVOjVwl/2gqnhbIgK0oE8eiNwurR6
+mSPZcxV0oAJpwiKU4T/imlCDaReGXn86xUX2l82KRxthNdQH/VLKEmzij0jpx4Vh
+bWx5SBPdkbmjDKX1dmTiRYWIn/KjyNPvNvmtwdi8Qluhf4eJcNEUr2BtblnGOmfL
+FdSu+brPJozpoQ1QdDnbAQRgqnh7Shl0tT85whQi0uquqIj1gEOGVjmBvDDnL3GV
+WOENTKqsmIIoEzdZrql1pfmYTk7WNaD92bfpN128j8BF7RmAV4/DphH0pvK05y9m
+tmRhyHGxAoGBAOV2BBocsm6xup575VqmFN+EnIOiTn+haOvfdnVsyQHnth63fOQx
+PNtMpTPR1OMKGpJ13e2bV0IgcYRsRkScVkUtoa/17VIgqZXffnJJ0A/HT67uKBq3
+8o7RrtyK5N20otw0lZHyqOPhyCdpSsurDhNON1kPVJVYY4N1RiIxfut/AoGBAPHz
+HfsJ5ZkyELE9N/r4fce04lprxWH+mQGK0/PfjS9caXPhj/r5ZkVMvzWesF3mmnY8
+goE5S35TuTvV1+6rKGizwlCFAQlyXJiFpOryNWpLwCmDDSzLcm+sToAlML3tMgWU
+jM3dWHx3C93c3ft4rSWJaUYI9JbHsMzDW6Yh+GbbAoGBANIbKwxh5Hx5XwEJP2yu
+kIROYCYkMy6otHLujgBdmPyWl+suZjxoXWoMl2SIqR8vPD+Jj6mmyNJy9J6lqf3f
+DRuQ+fEuBZ1i7QWfvJ+XuN0JyovJ5Iz6jC58D1pAD+p2IX3y5FXcVQs8zVJRFjzB
+p0TEJOf2oqORaKWRd6ONoMKvAoGALKu6aVMWdQZtVov6/fdLIcgf0pn7Q3CCR2qe
+X3Ry2L+zKJYIw0mwvDLDSt8VqQCenB3n6nvtmFFU7ds5lvM67rnhsoQcAOaAehiS
+rl4xxoJd5Ewx7odRhZTGmZpEOYzFo4odxRSM9c30/u18fqV1Mm0AZtHYds4/sk6P
+aUj0V+kCgYBMpGrJk8RSez5g0XZ35HfpI4ENoWbiwB59FIpWsLl2LADEh29eC455
+t9Muq7MprBVBHQo11TMLLFxDIjkuMho/gcKgpYXCt0LfiNm8EZehvLJUXH+3WqUx
+we6ywrbFCs6LaxaOCtTiLsN+GbZCatITL0UJaeBmTAbiw0KQjUuZPQ==
+-----END RSA PRIVATE KEY-----`
+)
diff --git a/vendor/github.com/hashicorp/vault/vault/token_store.go b/vendor/github.com/hashicorp/vault/vault/token_store.go
new file mode 100644
index 0000000..46614ed
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/token_store.go
@@ -0,0 +1,2285 @@
+package vault
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "strings"
+ "time"
+
+ "github.com/armon/go-metrics"
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/helper/locksutil"
+ "github.com/hashicorp/vault/helper/parseutil"
+ "github.com/hashicorp/vault/helper/policyutil"
+ "github.com/hashicorp/vault/helper/salt"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ "github.com/mitchellh/mapstructure"
+)
+
+const (
+ // lookupPrefix is the prefix used to store tokens for their
+ // primary ID based index
+ lookupPrefix = "id/"
+
+ // accessorPrefix is the prefix used to store the index from
+ // Accessor to Token ID
+ accessorPrefix = "accessor/"
+
+ // parentPrefix is the prefix used to store tokens for their
+ // secondar parent based index
+ parentPrefix = "parent/"
+
+ // tokenSubPath is the sub-path used for the token store
+ // view. This is nested under the system view.
+ tokenSubPath = "token/"
+
+ // rolesPrefix is the prefix used to store role information
+ rolesPrefix = "roles/"
+
+ // tokenRevocationDeferred indicates that the token should not be used
+ // again but is currently fulfilling its final use
+ tokenRevocationDeferred = -1
+
+ // tokenRevocationInProgress indicates that revocation of that token/its
+ // leases is ongoing
+ tokenRevocationInProgress = -2
+
+ // tokenRevocationFailed indicates that revocation failed; the entry is
+ // kept around so that when the tidy function is run it can be tried
+ // again (or when the revocation function is run again), but all other uses
+ // will report the token invalid
+ tokenRevocationFailed = -3
+)
+
+var (
+ // displayNameSanitize is used to sanitize a display name given to a token.
+ displayNameSanitize = regexp.MustCompile("[^a-zA-Z0-9-]")
+
+ // pathSuffixSanitize is used to ensure a path suffix in a role is valid.
+ pathSuffixSanitize = regexp.MustCompile("\\w[\\w-.]+\\w")
+
+ destroyCubbyhole = func(ts *TokenStore, saltedID string) error {
+ if ts.cubbyholeBackend == nil {
+ // Should only ever happen in testing
+ return nil
+ }
+ return ts.cubbyholeBackend.revoke(salt.SaltID(ts.cubbyholeBackend.saltUUID, saltedID, salt.SHA1Hash))
+ }
+)
+
+// TokenStore is used to manage client tokens. Tokens are used for
+// clients to authenticate, and each token is mapped to an applicable
+// set of policy which is used for authorization.
+type TokenStore struct {
+ *framework.Backend
+
+ view *BarrierView
+ salt *salt.Salt
+
+ expiration *ExpirationManager
+
+ cubbyholeBackend *CubbyholeBackend
+
+ policyLookupFunc func(string) (*Policy, error)
+
+ tokenLocks []*locksutil.LockEntry
+
+ cubbyholeDestroyer func(*TokenStore, string) error
+}
+
+// NewTokenStore is used to construct a token store that is
+// backed by the given barrier view.
+func NewTokenStore(c *Core, config *logical.BackendConfig) (*TokenStore, error) {
+ // Create a sub-view
+ view := c.systemBarrierView.SubView(tokenSubPath)
+
+ // Initialize the store
+ t := &TokenStore{
+ view: view,
+ cubbyholeDestroyer: destroyCubbyhole,
+ }
+
+ if c.policyStore != nil {
+ t.policyLookupFunc = c.policyStore.GetPolicy
+ }
+
+ t.tokenLocks = locksutil.CreateLocks()
+
+ // Setup the framework endpoints
+ t.Backend = &framework.Backend{
+ AuthRenew: t.authRenew,
+
+ PathsSpecial: &logical.Paths{
+ Root: []string{
+ "revoke-orphan/*",
+ "accessors*",
+ },
+
+ // Most token store items are local since tokens are local, but a
+ // notable exception is roles
+ LocalStorage: []string{
+ lookupPrefix,
+ accessorPrefix,
+ parentPrefix,
+ "salt",
+ },
+ },
+
+ Paths: []*framework.Path{
+ &framework.Path{
+ Pattern: "roles/?$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: t.tokenStoreRoleList,
+ },
+
+ HelpSynopsis: tokenListRolesHelp,
+ HelpDescription: tokenListRolesHelp,
+ },
+
+ &framework.Path{
+ Pattern: "accessors/?$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: t.tokenStoreAccessorList,
+ },
+
+ HelpSynopsis: tokenListAccessorsHelp,
+ HelpDescription: tokenListAccessorsHelp,
+ },
+
+ &framework.Path{
+ Pattern: "roles/" + framework.GenericNameRegex("role_name"),
+ Fields: map[string]*framework.FieldSchema{
+ "role_name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role",
+ },
+
+ "allowed_policies": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "",
+ Description: tokenAllowedPoliciesHelp,
+ },
+
+ "disallowed_policies": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "",
+ Description: tokenDisallowedPoliciesHelp,
+ },
+
+ "orphan": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: false,
+ Description: tokenOrphanHelp,
+ },
+
+ "period": &framework.FieldSchema{
+ Type: framework.TypeDurationSecond,
+ Default: 0,
+ Description: tokenPeriodHelp,
+ },
+
+ "path_suffix": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Default: "",
+ Description: tokenPathSuffixHelp + pathSuffixSanitize.String(),
+ },
+
+ "explicit_max_ttl": &framework.FieldSchema{
+ Type: framework.TypeDurationSecond,
+ Default: 0,
+ Description: tokenExplicitMaxTTLHelp,
+ },
+
+ "renewable": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: true,
+ Description: tokenRenewableHelp,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: t.tokenStoreRoleRead,
+ logical.CreateOperation: t.tokenStoreRoleCreateUpdate,
+ logical.UpdateOperation: t.tokenStoreRoleCreateUpdate,
+ logical.DeleteOperation: t.tokenStoreRoleDelete,
+ },
+
+ ExistenceCheck: t.tokenStoreRoleExistenceCheck,
+
+ HelpSynopsis: tokenPathRolesHelp,
+ HelpDescription: tokenPathRolesHelp,
+ },
+
+ &framework.Path{
+ Pattern: "create-orphan$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: t.handleCreateOrphan,
+ },
+
+ HelpSynopsis: strings.TrimSpace(tokenCreateOrphanHelp),
+ HelpDescription: strings.TrimSpace(tokenCreateOrphanHelp),
+ },
+
+ &framework.Path{
+ Pattern: "create/" + framework.GenericNameRegex("role_name"),
+
+ Fields: map[string]*framework.FieldSchema{
+ "role_name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: t.handleCreateAgainstRole,
+ },
+
+ HelpSynopsis: strings.TrimSpace(tokenCreateRoleHelp),
+ HelpDescription: strings.TrimSpace(tokenCreateRoleHelp),
+ },
+
+ &framework.Path{
+ Pattern: "create$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: t.handleCreate,
+ },
+
+ HelpSynopsis: strings.TrimSpace(tokenCreateHelp),
+ HelpDescription: strings.TrimSpace(tokenCreateHelp),
+ },
+
+ &framework.Path{
+ Pattern: "lookup" + framework.OptionalParamRegex("urltoken"),
+
+ Fields: map[string]*framework.FieldSchema{
+ "urltoken": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Token to lookup (URL parameter)",
+ },
+ "token": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Token to lookup (POST request body)",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: t.handleLookup,
+ logical.UpdateOperation: t.handleLookup,
+ },
+
+ HelpSynopsis: strings.TrimSpace(tokenLookupHelp),
+ HelpDescription: strings.TrimSpace(tokenLookupHelp),
+ },
+
+ &framework.Path{
+ Pattern: "lookup-accessor" + framework.OptionalParamRegex("urlaccessor"),
+
+ Fields: map[string]*framework.FieldSchema{
+ "urlaccessor": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Accessor of the token to look up (URL parameter)",
+ },
+ "accessor": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Accessor of the token to look up (request body)",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: t.handleUpdateLookupAccessor,
+ },
+
+ HelpSynopsis: strings.TrimSpace(tokenLookupAccessorHelp),
+ HelpDescription: strings.TrimSpace(tokenLookupAccessorHelp),
+ },
+
+ &framework.Path{
+ Pattern: "lookup-self$",
+
+ Fields: map[string]*framework.FieldSchema{
+ "token": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Token to look up (unused, does not need to be set)",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: t.handleLookupSelf,
+ logical.ReadOperation: t.handleLookupSelf,
+ },
+
+ HelpSynopsis: strings.TrimSpace(tokenLookupHelp),
+ HelpDescription: strings.TrimSpace(tokenLookupHelp),
+ },
+
+ &framework.Path{
+ Pattern: "revoke-accessor" + framework.OptionalParamRegex("urlaccessor"),
+
+ Fields: map[string]*framework.FieldSchema{
+ "urlaccessor": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Accessor of the token (URL parameter)",
+ },
+ "accessor": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Accessor of the token (request body)",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: t.handleUpdateRevokeAccessor,
+ },
+
+ HelpSynopsis: strings.TrimSpace(tokenRevokeAccessorHelp),
+ HelpDescription: strings.TrimSpace(tokenRevokeAccessorHelp),
+ },
+
+ &framework.Path{
+ Pattern: "revoke-self$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: t.handleRevokeSelf,
+ },
+
+ HelpSynopsis: strings.TrimSpace(tokenRevokeSelfHelp),
+ HelpDescription: strings.TrimSpace(tokenRevokeSelfHelp),
+ },
+
+ &framework.Path{
+ Pattern: "revoke" + framework.OptionalParamRegex("urltoken"),
+
+ Fields: map[string]*framework.FieldSchema{
+ "urltoken": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Token to revoke (URL parameter)",
+ },
+ "token": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Token to revoke (request body)",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: t.handleRevokeTree,
+ },
+
+ HelpSynopsis: strings.TrimSpace(tokenRevokeHelp),
+ HelpDescription: strings.TrimSpace(tokenRevokeHelp),
+ },
+
+ &framework.Path{
+ Pattern: "revoke-orphan" + framework.OptionalParamRegex("urltoken"),
+
+ Fields: map[string]*framework.FieldSchema{
+ "urltoken": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Token to revoke (URL parameter)",
+ },
+ "token": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Token to revoke (request body)",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: t.handleRevokeOrphan,
+ },
+
+ HelpSynopsis: strings.TrimSpace(tokenRevokeOrphanHelp),
+ HelpDescription: strings.TrimSpace(tokenRevokeOrphanHelp),
+ },
+
+ &framework.Path{
+ Pattern: "renew-self$",
+
+ Fields: map[string]*framework.FieldSchema{
+ "token": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Token to renew (unused, does not need to be set)",
+ },
+ "increment": &framework.FieldSchema{
+ Type: framework.TypeDurationSecond,
+ Default: 0,
+ Description: "The desired increment in seconds to the token expiration",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: t.handleRenewSelf,
+ },
+
+ HelpSynopsis: strings.TrimSpace(tokenRenewSelfHelp),
+ HelpDescription: strings.TrimSpace(tokenRenewSelfHelp),
+ },
+
+ &framework.Path{
+ Pattern: "renew" + framework.OptionalParamRegex("urltoken"),
+
+ Fields: map[string]*framework.FieldSchema{
+ "urltoken": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Token to renew (URL parameter)",
+ },
+ "token": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Token to renew (request body)",
+ },
+ "increment": &framework.FieldSchema{
+ Type: framework.TypeDurationSecond,
+ Default: 0,
+ Description: "The desired increment in seconds to the token expiration",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: t.handleRenew,
+ },
+
+ HelpSynopsis: strings.TrimSpace(tokenRenewHelp),
+ HelpDescription: strings.TrimSpace(tokenRenewHelp),
+ },
+
+ &framework.Path{
+ Pattern: "tidy$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: t.handleTidy,
+ },
+
+ HelpSynopsis: strings.TrimSpace(tokenTidyHelp),
+ HelpDescription: strings.TrimSpace(tokenTidyDesc),
+ },
+ },
+
+ Init: t.Initialize,
+ }
+
+ t.Backend.Setup(config)
+
+ return t, nil
+}
+
+func (ts *TokenStore) Initialize() error {
+ // Setup the salt
+ salt, err := salt.NewSalt(ts.view, &salt.Config{
+ HashFunc: salt.SHA1Hash,
+ })
+ if err != nil {
+ return err
+ }
+ ts.salt = salt
+
+ return nil
+}
+
+// TokenEntry is used to represent a given token
+type TokenEntry struct {
+ // ID of this entry, generally a random UUID
+ ID string `json:"id" mapstructure:"id" structs:"id"`
+
+ // Accessor for this token, a random UUID
+ Accessor string `json:"accessor" mapstructure:"accessor" structs:"accessor"`
+
+ // Parent token, used for revocation trees
+ Parent string `json:"parent" mapstructure:"parent" structs:"parent"`
+
+ // Which named policies should be used
+ Policies []string `json:"policies" mapstructure:"policies" structs:"policies"`
+
+ // Used for audit trails, this is something like "auth/user/login"
+ Path string `json:"path" mapstructure:"path" structs:"path"`
+
+ // Used for auditing. This could include things like "source", "user", "ip"
+ Meta map[string]string `json:"meta" mapstructure:"meta" structs:"meta"`
+
+ // Used for operators to be able to associate with the source
+ DisplayName string `json:"display_name" mapstructure:"display_name" structs:"display_name"`
+
+ // Used to restrict the number of uses (zero is unlimited). This is to
+ // support one-time-tokens (generalized). There are a few special values:
+ // if it's -1 it has run through its use counts and is executing its final
+ // use; if it's -2 it is tainted, which means revocation is currently
+ // running on it; and if it's -3 it's also tainted but revocation
+ // previously ran and failed, so this hints the tidy function to try it
+ // again.
+ NumUses int `json:"num_uses" mapstructure:"num_uses" structs:"num_uses"`
+
+ // Time of token creation
+ CreationTime int64 `json:"creation_time" mapstructure:"creation_time" structs:"creation_time"`
+
+ // Duration set when token was created
+ TTL time.Duration `json:"ttl" mapstructure:"ttl" structs:"ttl"`
+
+ // Explicit maximum TTL on the token
+ ExplicitMaxTTL time.Duration `json:"explicit_max_ttl" mapstructure:"explicit_max_ttl" structs:"explicit_max_ttl"`
+
+ // If set, the role that was used for parameters at creation time
+ Role string `json:"role" mapstructure:"role" structs:"role"`
+
+ // If set, the period of the token. This is only used when created directly
+ // through the create endpoint; periods managed by roles or other auth
+ // backends are subject to those renewal rules.
+ Period time.Duration `json:"period" mapstructure:"period" structs:"period"`
+
+ // These are the deprecated fields
+ DisplayNameDeprecated string `json:"DisplayName" mapstructure:"DisplayName" structs:"DisplayName"`
+ NumUsesDeprecated int `json:"NumUses" mapstructure:"NumUses" structs:"NumUses"`
+ CreationTimeDeprecated int64 `json:"CreationTime" mapstructure:"CreationTime" structs:"CreationTime"`
+ ExplicitMaxTTLDeprecated time.Duration `json:"ExplicitMaxTTL" mapstructure:"ExplicitMaxTTL" structs:"ExplicitMaxTTL"`
+}
+
+// tsRoleEntry contains token store role information
+type tsRoleEntry struct {
+ // The name of the role. Embedded so it can be used for pathing
+ Name string `json:"name" mapstructure:"name" structs:"name"`
+
+ // The policies that creation functions using this role can assign to a token,
+ // escaping or further locking down normal subset checking
+ AllowedPolicies []string `json:"allowed_policies" mapstructure:"allowed_policies" structs:"allowed_policies"`
+
+ // List of policies to be not allowed during token creation using this role
+ DisallowedPolicies []string `json:"disallowed_policies" mapstructure:"disallowed_policies" structs:"disallowed_policies"`
+
+ // If true, tokens created using this role will be orphans
+ Orphan bool `json:"orphan" mapstructure:"orphan" structs:"orphan"`
+
+ // If non-zero, tokens created using this role will be able to be renewed
+ // forever, but will have a fixed renewal period of this value
+ Period time.Duration `json:"period" mapstructure:"period" structs:"period"`
+
+ // If set, a suffix will be set on the token path, making it easier to
+ // revoke using 'revoke-prefix'
+ PathSuffix string `json:"path_suffix" mapstructure:"path_suffix" structs:"path_suffix"`
+
+ // If set, controls whether created tokens are marked as being renewable
+ Renewable bool `json:"renewable" mapstructure:"renewable" structs:"renewable"`
+
+ // If set, the token entry will have an explicit maximum TTL set, rather
+ // than deferring to role/mount values
+ ExplicitMaxTTL time.Duration `json:"explicit_max_ttl" mapstructure:"explicit_max_ttl" structs:"explicit_max_ttl"`
+}
+
+type accessorEntry struct {
+ TokenID string `json:"token_id"`
+ AccessorID string `json:"accessor_id"`
+}
+
+// SetExpirationManager is used to provide the token store with
+// an expiration manager. This is used to manage prefix based revocation
+// of tokens and to tidy entries when removed from the token store.
+func (ts *TokenStore) SetExpirationManager(exp *ExpirationManager) {
+ ts.expiration = exp
+}
+
+// SaltID is used to apply a salt and hash to an ID to make sure its not reversible
+func (ts *TokenStore) SaltID(id string) string {
+ return ts.salt.SaltID(id)
+}
+
+// RootToken is used to generate a new token with root privileges and no parent
+func (ts *TokenStore) rootToken() (*TokenEntry, error) {
+ te := &TokenEntry{
+ Policies: []string{"root"},
+ Path: "auth/token/root",
+ DisplayName: "root",
+ CreationTime: time.Now().Unix(),
+ }
+ if err := ts.create(te); err != nil {
+ return nil, err
+ }
+ return te, nil
+}
+
+func (ts *TokenStore) tokenStoreAccessorList(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ entries, err := ts.view.List(accessorPrefix)
+ if err != nil {
+ return nil, err
+ }
+
+ resp := &logical.Response{}
+
+ ret := make([]string, 0, len(entries))
+ for _, entry := range entries {
+ aEntry, err := ts.lookupBySaltedAccessor(entry)
+ if err != nil {
+ resp.AddWarning("Found an accessor entry that could not be successfully decoded")
+ continue
+ }
+ if aEntry.TokenID == "" {
+ resp.AddWarning(fmt.Sprintf("Found an accessor entry missing a token: %v", aEntry.AccessorID))
+ } else {
+ ret = append(ret, aEntry.AccessorID)
+ }
+ }
+
+ resp.Data = map[string]interface{}{
+ "keys": ret,
+ }
+ return resp, nil
+}
+
+// createAccessor is used to create an identifier for the token ID.
+// A storage index, mapping the accessor to the token ID is also created.
+func (ts *TokenStore) createAccessor(entry *TokenEntry) error {
+ defer metrics.MeasureSince([]string{"token", "createAccessor"}, time.Now())
+
+ // Create a random accessor
+ accessorUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ return err
+ }
+ entry.Accessor = accessorUUID
+
+ // Create index entry, mapping the accessor to the token ID
+ path := accessorPrefix + ts.SaltID(entry.Accessor)
+
+ aEntry := &accessorEntry{
+ TokenID: entry.ID,
+ AccessorID: entry.Accessor,
+ }
+ aEntryBytes, err := jsonutil.EncodeJSON(aEntry)
+ if err != nil {
+ return fmt.Errorf("failed to marshal accessor index entry: %v", err)
+ }
+
+ le := &logical.StorageEntry{Key: path, Value: aEntryBytes}
+ if err := ts.view.Put(le); err != nil {
+ return fmt.Errorf("failed to persist accessor index entry: %v", err)
+ }
+ return nil
+}
+
+// Create is used to create a new token entry. The entry is assigned
+// a newly generated ID if not provided.
+func (ts *TokenStore) create(entry *TokenEntry) error {
+ defer metrics.MeasureSince([]string{"token", "create"}, time.Now())
+ // Generate an ID if necessary
+ if entry.ID == "" {
+ entryUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ return err
+ }
+ entry.ID = entryUUID
+ }
+
+ entry.Policies = policyutil.SanitizePolicies(entry.Policies, policyutil.DoNotAddDefaultPolicy)
+
+ err := ts.createAccessor(entry)
+ if err != nil {
+ return err
+ }
+
+ return ts.storeCommon(entry, true)
+}
+
+// Store is used to store an updated token entry without writing the
+// secondary index.
+func (ts *TokenStore) store(entry *TokenEntry) error {
+ defer metrics.MeasureSince([]string{"token", "store"}, time.Now())
+ return ts.storeCommon(entry, false)
+}
+
+// storeCommon handles the actual storage of an entry, possibly generating
+// secondary indexes
+func (ts *TokenStore) storeCommon(entry *TokenEntry, writeSecondary bool) error {
+ saltedId := ts.SaltID(entry.ID)
+
+ // Marshal the entry
+ enc, err := json.Marshal(entry)
+ if err != nil {
+ return fmt.Errorf("failed to encode entry: %v", err)
+ }
+
+ if writeSecondary {
+ // Write the secondary index if necessary. This is done before the
+ // primary index because we'd rather have a dangling pointer with
+ // a missing primary instead of missing the parent index and potentially
+ // escaping the revocation chain.
+ if entry.Parent != "" {
+ // Ensure the parent exists
+ parent, err := ts.Lookup(entry.Parent)
+ if err != nil {
+ return fmt.Errorf("failed to lookup parent: %v", err)
+ }
+ if parent == nil {
+ return fmt.Errorf("parent token not found")
+ }
+
+ // Create the index entry
+ path := parentPrefix + ts.SaltID(entry.Parent) + "/" + saltedId
+ le := &logical.StorageEntry{Key: path}
+ if err := ts.view.Put(le); err != nil {
+ return fmt.Errorf("failed to persist entry: %v", err)
+ }
+ }
+ }
+
+ // Write the primary ID
+ path := lookupPrefix + saltedId
+ le := &logical.StorageEntry{Key: path, Value: enc}
+ if err := ts.view.Put(le); err != nil {
+ return fmt.Errorf("failed to persist entry: %v", err)
+ }
+ return nil
+}
+
+// UseToken is used to manage restricted use tokens and decrement their
+// available uses. Returns two values: a potentially updated entry or, if the
+// token has been revoked, nil; and whether an error was encountered. The
+// locking here isn't perfect, as other parts of the code may update an entry,
+// but usually none after the entry is already created...so this is pretty
+// good.
+func (ts *TokenStore) UseToken(te *TokenEntry) (*TokenEntry, error) {
+ if te == nil {
+ return nil, fmt.Errorf("invalid token entry provided for use count decrementing")
+ }
+
+ // This case won't be hit with a token with restricted uses because we go
+ // from 1 to -1. So it's a nice optimization to check this without a read
+ // lock.
+ if te.NumUses == 0 {
+ return te, nil
+ }
+
+ lock := locksutil.LockForKey(ts.tokenLocks, te.ID)
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Call lookupSalted instead of Lookup to avoid deadlocking since Lookup grabs a read lock
+ te, err := ts.lookupSalted(ts.SaltID(te.ID), false)
+ if err != nil {
+ return nil, fmt.Errorf("failed to refresh entry: %v", err)
+ }
+ // If it can't be found we shouldn't be trying to use it, so if we get nil
+ // back, it is because it has been revoked in the interim or will be
+ // revoked (NumUses is -1)
+ if te == nil {
+ return nil, fmt.Errorf("token not found or fully used already")
+ }
+
+ // Decrement the count. If this is our last use count, we need to indicate
+ // that this is no longer valid, but revocation is deferred to the end of
+ // the call, so this will make sure that any Lookup that happens doesn't
+ // return an entry. This essentially acts as a write-ahead lock and is
+ // especially useful since revocation can end up (via the expiration
+ // manager revoking children) attempting to acquire the same lock
+ // repeatedly.
+ if te.NumUses == 1 {
+ te.NumUses = -1
+ } else {
+ te.NumUses -= 1
+ }
+
+ err = ts.storeCommon(te, false)
+ if err != nil {
+ return nil, err
+ }
+
+ return te, nil
+}
+
+func (ts *TokenStore) UseTokenByID(id string) (*TokenEntry, error) {
+ te, err := ts.Lookup(id)
+ if err != nil {
+ return te, err
+ }
+
+ return ts.UseToken(te)
+}
+
+// Lookup is used to find a token given its ID. It acquires a read lock, then calls lookupSalted.
+func (ts *TokenStore) Lookup(id string) (*TokenEntry, error) {
+ defer metrics.MeasureSince([]string{"token", "lookup"}, time.Now())
+ if id == "" {
+ return nil, fmt.Errorf("cannot lookup blank token")
+ }
+
+ lock := locksutil.LockForKey(ts.tokenLocks, id)
+ lock.RLock()
+ defer lock.RUnlock()
+
+ return ts.lookupSalted(ts.SaltID(id), false)
+}
+
+// lookupSalted is used to find a token given its salted ID. If tainted is
+// true, entries that are in some revocation state (currently, indicated by num
+// uses < 0), the entry will be returned anyways
+func (ts *TokenStore) lookupSalted(saltedId string, tainted bool) (*TokenEntry, error) {
+ // Lookup token
+ path := lookupPrefix + saltedId
+ raw, err := ts.view.Get(path)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read entry: %v", err)
+ }
+
+ // Bail if not found
+ if raw == nil {
+ return nil, nil
+ }
+
+ // Unmarshal the token
+ entry := new(TokenEntry)
+ if err := jsonutil.DecodeJSON(raw.Value, entry); err != nil {
+ return nil, fmt.Errorf("failed to decode entry: %v", err)
+ }
+
+ // This is a token that is awaiting deferred revocation or tainted
+ if entry.NumUses < 0 && !tainted {
+ return nil, nil
+ }
+
+ persistNeeded := false
+
+ // Upgrade the deprecated fields
+ if entry.DisplayNameDeprecated != "" {
+ if entry.DisplayName == "" {
+ entry.DisplayName = entry.DisplayNameDeprecated
+ }
+ entry.DisplayNameDeprecated = ""
+ persistNeeded = true
+ }
+
+ if entry.CreationTimeDeprecated != 0 {
+ if entry.CreationTime == 0 {
+ entry.CreationTime = entry.CreationTimeDeprecated
+ }
+ entry.CreationTimeDeprecated = 0
+ persistNeeded = true
+ }
+
+ if entry.ExplicitMaxTTLDeprecated != 0 {
+ if entry.ExplicitMaxTTL == 0 {
+ entry.ExplicitMaxTTL = entry.ExplicitMaxTTLDeprecated
+ }
+ entry.ExplicitMaxTTLDeprecated = 0
+ persistNeeded = true
+ }
+
+ if entry.NumUsesDeprecated != 0 {
+ if entry.NumUses == 0 || entry.NumUsesDeprecated < entry.NumUses {
+ entry.NumUses = entry.NumUsesDeprecated
+ }
+ entry.NumUsesDeprecated = 0
+ persistNeeded = true
+ }
+
+ // If fields are getting upgraded, store the changes
+ if persistNeeded {
+ if err := ts.storeCommon(entry, false); err != nil {
+ return nil, fmt.Errorf("failed to persist token upgrade: %v", err)
+ }
+ }
+
+ return entry, nil
+}
+
+// Revoke is used to invalidate a given token, any child tokens
+// will be orphaned.
+func (ts *TokenStore) Revoke(id string) error {
+ defer metrics.MeasureSince([]string{"token", "revoke"}, time.Now())
+ if id == "" {
+ return fmt.Errorf("cannot revoke blank token")
+ }
+
+ return ts.revokeSalted(ts.SaltID(id))
+}
+
+// revokeSalted is used to invalidate a given salted token,
+// any child tokens will be orphaned.
+func (ts *TokenStore) revokeSalted(saltedId string) (ret error) {
+ // Protect the entry lookup/writing with locks. The rub here is that we
+ // don't know the ID until we look it up once, so first we look it up, then
+ // do a locked lookup.
+ entry, err := ts.lookupSalted(saltedId, true)
+ if err != nil {
+ return err
+ }
+ if entry == nil {
+ return nil
+ }
+
+ lock := locksutil.LockForKey(ts.tokenLocks, entry.ID)
+ lock.Lock()
+
+ // Lookup the token first
+ entry, err = ts.lookupSalted(saltedId, true)
+ if err != nil {
+ lock.Unlock()
+ return err
+ }
+
+ if entry == nil {
+ lock.Unlock()
+ return nil
+ }
+
+ // On failure we write -3, so if we hit -2 here we're already running a
+ // revocation operation. This can happen due to e.g. recursion into this
+ // function via the expiration manager's RevokeByToken.
+ if entry.NumUses == tokenRevocationInProgress {
+ lock.Unlock()
+ return nil
+ }
+
+ // This acts as a WAL. lookupSalted will no longer return this entry,
+ // so the token cannot be used, but this way we can keep the entry
+ // around until after the rest of this function is attempted, and a
+ // tidy function can key off of this value to try again.
+ entry.NumUses = tokenRevocationInProgress
+ err = ts.storeCommon(entry, false)
+ lock.Unlock()
+ if err != nil {
+ return err
+ }
+
+ // If we are returning an error, mark the entry with -3 to indicate
+ // failed revocation. This way we don't try to clean up during active
+ // revocation (-2).
+ defer func() {
+ if ret != nil {
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Lookup the token again to make sure something else didn't
+ // revoke in the interim
+ entry, err := ts.lookupSalted(saltedId, true)
+ if err != nil {
+ return
+ }
+
+ // If it exists just taint to -3 rather than trying to figure
+ // out what it means if it's already -3 after the -2 above
+ if entry != nil {
+ entry.NumUses = tokenRevocationFailed
+ ts.storeCommon(entry, false)
+ }
+ }
+ }()
+
+ // Destroy the token's cubby. This should go first as it's a
+ // security-sensitive item.
+ err = ts.cubbyholeDestroyer(ts, saltedId)
+ if err != nil {
+ return err
+ }
+
+ // Revoke all secrets under this token. This should go first as it's a
+ // security-sensitive item.
+ if err := ts.expiration.RevokeByToken(entry); err != nil {
+ return err
+ }
+
+ // Clear the secondary index if any
+ if entry.Parent != "" {
+ path := parentPrefix + ts.SaltID(entry.Parent) + "/" + saltedId
+ if err = ts.view.Delete(path); err != nil {
+ return fmt.Errorf("failed to delete entry: %v", err)
+ }
+ }
+
+ // Clear the accessor index if any
+ if entry.Accessor != "" {
+ path := accessorPrefix + ts.SaltID(entry.Accessor)
+ if err = ts.view.Delete(path); err != nil {
+ return fmt.Errorf("failed to delete entry: %v", err)
+ }
+ }
+
+ // Now that the entry is not usable for any revocation tasks, nuke it
+ path := lookupPrefix + saltedId
+ if err = ts.view.Delete(path); err != nil {
+ return fmt.Errorf("failed to delete entry: %v", err)
+ }
+
+ return nil
+}
+
+// RevokeTree is used to invalide a given token and all
+// child tokens.
+func (ts *TokenStore) RevokeTree(id string) error {
+ defer metrics.MeasureSince([]string{"token", "revoke-tree"}, time.Now())
+ // Verify the token is not blank
+ if id == "" {
+ return fmt.Errorf("cannot tree-revoke blank token")
+ }
+
+ // Get the salted ID
+ saltedId := ts.SaltID(id)
+
+ // Nuke the entire tree recursively
+ if err := ts.revokeTreeSalted(saltedId); err != nil {
+ return err
+ }
+ return nil
+}
+
+// revokeTreeSalted is used to invalide a given token and all
+// child tokens using a saltedID.
+func (ts *TokenStore) revokeTreeSalted(saltedId string) error {
+ // Scan for child tokens
+ path := parentPrefix + saltedId + "/"
+ children, err := ts.view.List(path)
+ if err != nil {
+ return fmt.Errorf("failed to scan for children: %v", err)
+ }
+
+ // Recursively nuke the children. The subtle nuance here is that
+ // we don't have the acutal ID of the child, but we have the salted
+ // value. Turns out, this is good enough!
+ for _, child := range children {
+ if err := ts.revokeTreeSalted(child); err != nil {
+ return err
+ }
+ }
+
+ // Revoke this entry
+ if err := ts.revokeSalted(saltedId); err != nil {
+ return fmt.Errorf("failed to revoke entry: %v", err)
+ }
+ return nil
+}
+
+// handleCreateAgainstRole handles the auth/token/create path for a role
+func (ts *TokenStore) handleCreateAgainstRole(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ name := d.Get("role_name").(string)
+ roleEntry, err := ts.tokenStoreRole(name)
+ if err != nil {
+ return nil, err
+ }
+ if roleEntry == nil {
+ return logical.ErrorResponse(fmt.Sprintf("unknown role %s", name)), nil
+ }
+
+ return ts.handleCreateCommon(req, d, false, roleEntry)
+}
+
+func (ts *TokenStore) lookupByAccessor(accessor string) (accessorEntry, error) {
+ return ts.lookupBySaltedAccessor(ts.SaltID(accessor))
+}
+
+func (ts *TokenStore) lookupBySaltedAccessor(saltedAccessor string) (accessorEntry, error) {
+ entry, err := ts.view.Get(accessorPrefix + saltedAccessor)
+ var aEntry accessorEntry
+
+ if err != nil {
+ return aEntry, fmt.Errorf("failed to read index using accessor: %s", err)
+ }
+ if entry == nil {
+ return aEntry, &logical.StatusBadRequest{Err: "invalid accessor"}
+ }
+
+ err = jsonutil.DecodeJSON(entry.Value, &aEntry)
+ // If we hit an error, assume it's a pre-struct straight token ID
+ if err != nil {
+ aEntry.TokenID = string(entry.Value)
+ te, err := ts.lookupSalted(ts.SaltID(aEntry.TokenID), false)
+ if err != nil {
+ return accessorEntry{}, fmt.Errorf("failed to look up token using accessor index: %s", err)
+ }
+ // It's hard to reason about what to do here -- it may be that the
+ // token was revoked async, or that it's an old accessor index entry
+ // that was somehow not cleared up, or or or. A nonexistent token entry
+ // on lookup is nil, not an error, so we keep that behavior here to be
+ // safe...the token ID is simply not filled in.
+ if te != nil {
+ aEntry.AccessorID = te.Accessor
+ }
+ }
+
+ return aEntry, nil
+}
+
+// handleTidy handles the cleaning up of leaked accessor storage entries and
+// cleaning up of leases that are associated to tokens that are expired.
+func (ts *TokenStore) handleTidy(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ // List out all the accessors
+ saltedAccessorList, err := ts.view.List(accessorPrefix)
+ if err != nil {
+ return nil, fmt.Errorf("failed to fetch accessor entries: %v", err)
+ }
+
+ var tidyErrors *multierror.Error
+
+ // First, clean up secondary index entries that are no longer valid
+ parentList, err := ts.view.List(parentPrefix)
+ if err != nil {
+ return nil, fmt.Errorf("failed to fetch secondary index entries: %v", err)
+ }
+
+ // Scan through the secondary index entries; if there is an entry
+ // with the token's salt ID at the end, remove it
+ for _, parent := range parentList {
+ children, err := ts.view.List(parentPrefix + parent + "/")
+ if err != nil {
+ tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to read child index entry: %v", err))
+ continue
+ }
+
+ for _, child := range children {
+ // Look up tainted entries so we can be sure that if this isn't
+ // found, it doesn't exist
+ te, _ := ts.lookupSalted(child, true)
+ if te == nil {
+ err = ts.view.Delete(parentPrefix + parent + "/" + child)
+ if err != nil {
+ tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to delete secondary index entry: %v", err))
+ }
+ }
+ }
+ }
+
+ // For each of the accessor, see if the token ID associated with it is
+ // a valid one. If not, delete the leases associated with that token
+ // and delete the accessor as well.
+ for _, saltedAccessor := range saltedAccessorList {
+ accessorEntry, err := ts.lookupBySaltedAccessor(saltedAccessor)
+ if err != nil {
+ tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to read the accessor entry: %v", err))
+ continue
+ }
+
+ // A valid accessor storage entry should always have a token ID
+ // in it. If not, it is an invalid accessor entry and needs to
+ // be deleted.
+ if accessorEntry.TokenID == "" {
+ // If deletion of accessor fails, move on to the next
+ // item since this is just a best-effort operation
+ err = ts.view.Delete(accessorPrefix + saltedAccessor)
+ if err != nil {
+ tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to delete the accessor entry: %v", err))
+ continue
+ }
+ }
+
+ saltedId := ts.SaltID(accessorEntry.TokenID)
+
+ // Look up tainted variants so we only find entries that truly don't
+ // exist
+ te, err := ts.lookupSalted(saltedId, true)
+
+ // If token entry is not found assume that the token is not valid any
+ // more and conclude that accessor, leases, and secondary index entries
+ // for this token should not exist as well.
+ if te == nil {
+ // RevokeByToken expects a '*TokenEntry'. For the
+ // purposes of tidying, it is sufficient if the token
+ // entry only has ID set.
+ tokenEntry := &TokenEntry{
+ ID: accessorEntry.TokenID,
+ }
+
+ // Attempt to revoke the token. This will also revoke
+ // the leases associated with the token.
+ err := ts.expiration.RevokeByToken(tokenEntry)
+ if err != nil {
+ tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to revoke leases of expired token: %v", err))
+ continue
+ }
+
+ // If deletion of accessor fails, move on to the next item since
+ // this is just a best-effort operation. We do this last so that on
+ // next run if something above failed we still have the accessor
+ // entry to try again.
+ err = ts.view.Delete(accessorPrefix + saltedAccessor)
+ if err != nil {
+ tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to delete accessor entry: %v", err))
+ continue
+ }
+ }
+ }
+
+ // Later request handling code seems to check if the type is multierror so
+ // if we haven't added any errors we need to just return a normal nil error
+ if tidyErrors == nil {
+ return nil, nil
+ }
+
+ return nil, tidyErrors
+}
+
+// handleUpdateLookupAccessor handles the auth/token/lookup-accessor path for returning
+// the properties of the token associated with the accessor
+func (ts *TokenStore) handleUpdateLookupAccessor(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ var urlaccessor bool
+ accessor := data.Get("accessor").(string)
+ if accessor == "" {
+ accessor = data.Get("urlaccessor").(string)
+ if accessor == "" {
+ return nil, &logical.StatusBadRequest{Err: "missing accessor"}
+ }
+ urlaccessor = true
+ }
+
+ aEntry, err := ts.lookupByAccessor(accessor)
+ if err != nil {
+ return nil, err
+ }
+
+ // Prepare the field data required for a lookup call
+ d := &framework.FieldData{
+ Raw: map[string]interface{}{
+ "token": aEntry.TokenID,
+ },
+ Schema: map[string]*framework.FieldSchema{
+ "token": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Token to lookup",
+ },
+ },
+ }
+ resp, err := ts.handleLookup(req, d)
+ if err != nil {
+ return nil, err
+ }
+ if resp == nil {
+ return nil, fmt.Errorf("failed to lookup the token")
+ }
+ if resp.IsError() {
+ return resp, nil
+
+ }
+
+ // Remove the token ID from the response
+ if resp.Data != nil {
+ resp.Data["id"] = ""
+ }
+
+ if urlaccessor {
+ resp.AddWarning(`Using an accessor in the path is unsafe as the accessor can be logged in many places. Please use POST or PUT with the accessor passed in via the "accessor" parameter.`)
+ }
+
+ return resp, nil
+}
+
+// handleUpdateRevokeAccessor handles the auth/token/revoke-accessor path for revoking
+// the token associated with the accessor
+func (ts *TokenStore) handleUpdateRevokeAccessor(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ var urlaccessor bool
+ accessor := data.Get("accessor").(string)
+ if accessor == "" {
+ accessor = data.Get("urlaccessor").(string)
+ if accessor == "" {
+ return nil, &logical.StatusBadRequest{Err: "missing accessor"}
+ }
+ urlaccessor = true
+ }
+
+ aEntry, err := ts.lookupByAccessor(accessor)
+ if err != nil {
+ return nil, err
+ }
+
+ // Revoke the token and its children
+ if err := ts.RevokeTree(aEntry.TokenID); err != nil {
+ return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
+ }
+
+ if urlaccessor {
+ resp := &logical.Response{}
+ resp.AddWarning(`Using an accessor in the path is unsafe as the accessor can be logged in many places. Please use POST or PUT with the accessor passed in via the "accessor" parameter.`)
+ return resp, nil
+ }
+
+ return nil, nil
+}
+
+// handleCreate handles the auth/token/create path for creation of new orphan
+// tokens
+func (ts *TokenStore) handleCreateOrphan(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ return ts.handleCreateCommon(req, d, true, nil)
+}
+
+// handleCreate handles the auth/token/create path for creation of new non-orphan
+// tokens
+func (ts *TokenStore) handleCreate(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ return ts.handleCreateCommon(req, d, false, nil)
+}
+
+// handleCreateCommon handles the auth/token/create path for creation of new tokens
+func (ts *TokenStore) handleCreateCommon(
+ req *logical.Request, d *framework.FieldData, orphan bool, role *tsRoleEntry) (*logical.Response, error) {
+ // Read the parent policy
+ parent, err := ts.Lookup(req.ClientToken)
+ if err != nil || parent == nil {
+ return logical.ErrorResponse("parent token lookup failed"), logical.ErrInvalidRequest
+ }
+
+ // A token with a restricted number of uses cannot create a new token
+ // otherwise it could escape the restriction count.
+ if parent.NumUses > 0 {
+ return logical.ErrorResponse("restricted use token cannot generate child tokens"),
+ logical.ErrInvalidRequest
+ }
+
+ // Check if the client token has sudo/root privileges for the requested path
+ isSudo := ts.System().SudoPrivilege(req.MountPoint+req.Path, req.ClientToken)
+
+ // Read and parse the fields
+ var data struct {
+ ID string
+ Policies []string
+ Metadata map[string]string `mapstructure:"meta"`
+ NoParent bool `mapstructure:"no_parent"`
+ NoDefaultPolicy bool `mapstructure:"no_default_policy"`
+ Lease string
+ TTL string
+ Renewable *bool
+ ExplicitMaxTTL string `mapstructure:"explicit_max_ttl"`
+ DisplayName string `mapstructure:"display_name"`
+ NumUses int `mapstructure:"num_uses"`
+ Period string
+ }
+ if err := mapstructure.WeakDecode(req.Data, &data); err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "Error decoding request: %s", err)), logical.ErrInvalidRequest
+ }
+
+ // Verify the number of uses is positive
+ if data.NumUses < 0 {
+ return logical.ErrorResponse("number of uses cannot be negative"),
+ logical.ErrInvalidRequest
+ }
+
+ // Setup the token entry
+ te := TokenEntry{
+ Parent: req.ClientToken,
+
+ // The mount point is always the same since we have only one token
+ // store; using req.MountPoint causes trouble in tests since they don't
+ // have an official mount
+ Path: fmt.Sprintf("auth/token/%s", req.Path),
+
+ Meta: data.Metadata,
+ DisplayName: "token",
+ NumUses: data.NumUses,
+ CreationTime: time.Now().Unix(),
+ }
+
+ renewable := true
+ if data.Renewable != nil {
+ renewable = *data.Renewable
+ }
+
+ // If the role is not nil, we add the role name as part of the token's
+ // path. This makes it much easier to later revoke tokens that were issued
+ // by a role (using revoke-prefix). Users can further specify a PathSuffix
+ // in the role; that way they can use something like "v1", "v2" to indicate
+ // role revisions, and revoke only tokens issued with a previous revision.
+ if role != nil {
+ te.Role = role.Name
+
+ // If renewable hasn't been disabled in the call and the role has
+ // renewability disabled, set renewable false
+ if renewable && !role.Renewable {
+ renewable = false
+ }
+
+ if role.PathSuffix != "" {
+ te.Path = fmt.Sprintf("%s/%s", te.Path, role.PathSuffix)
+ }
+ }
+
+ // Attach the given display name if any
+ if data.DisplayName != "" {
+ full := "token-" + data.DisplayName
+ full = displayNameSanitize.ReplaceAllString(full, "-")
+ full = strings.TrimSuffix(full, "-")
+ te.DisplayName = full
+ }
+
+ // Allow specifying the ID of the token if the client has root or sudo privileges
+ if data.ID != "" {
+ if !isSudo {
+ return logical.ErrorResponse("root or sudo privileges required to specify token id"),
+ logical.ErrInvalidRequest
+ }
+ te.ID = data.ID
+ }
+
+ resp := &logical.Response{}
+
+ var addDefault bool
+
+ // N.B.: The logic here uses various calculations as to whether default
+ // should be added. In the end we decided that if NoDefaultPolicy is set it
+ // should be stripped out regardless, *but*, the logic of when it should
+ // and shouldn't be added is kept because we want to do subset comparisons
+ // based on adding default when it's correct to do so.
+ switch {
+ case role != nil && (len(role.AllowedPolicies) > 0 || len(role.DisallowedPolicies) > 0):
+ // Holds the final set of policies as they get munged
+ var finalPolicies []string
+
+ // We don't make use of the global one because roles with allowed or
+ // disallowed set do their own policy rules
+ var localAddDefault bool
+
+ // If the request doesn't say not to add "default" and if "default"
+ // isn't in the disallowed list, add it. This is in line with the idea
+ // that roles, when allowed/disallowed ar set, allow a subset of
+ // policies to be set disjoint from the parent token's policies.
+ if !data.NoDefaultPolicy && !strutil.StrListContains(role.DisallowedPolicies, "default") {
+ localAddDefault = true
+ }
+
+ // Start with passed-in policies as a baseline, if they exist
+ if len(data.Policies) > 0 {
+ finalPolicies = policyutil.SanitizePolicies(data.Policies, localAddDefault)
+ }
+
+ var sanitizedRolePolicies []string
+
+ // First check allowed policies; if policies are specified they will be
+ // checked, otherwise if an allowed set exists that will be the set
+ // that is used
+ if len(role.AllowedPolicies) > 0 {
+ // Note that if "default" is already in allowed, and also in
+ // disallowed, this will still result in an error later since this
+ // doesn't strip out default
+ sanitizedRolePolicies = policyutil.SanitizePolicies(role.AllowedPolicies, localAddDefault)
+
+ if len(finalPolicies) == 0 {
+ finalPolicies = sanitizedRolePolicies
+ } else {
+ if !strutil.StrListSubset(sanitizedRolePolicies, finalPolicies) {
+ return logical.ErrorResponse(fmt.Sprintf("token policies (%v) must be subset of the role's allowed policies (%v)", finalPolicies, sanitizedRolePolicies)), logical.ErrInvalidRequest
+ }
+ }
+ } else {
+ // Assign parent policies if none have been requested. As this is a
+ // role, add default unless explicitly disabled.
+ if len(finalPolicies) == 0 {
+ finalPolicies = policyutil.SanitizePolicies(parent.Policies, localAddDefault)
+ }
+ }
+
+ if len(role.DisallowedPolicies) > 0 {
+ // We don't add the default here because we only want to disallow it if it's explicitly set
+ sanitizedRolePolicies = strutil.RemoveDuplicates(role.DisallowedPolicies, true)
+
+ for _, finalPolicy := range finalPolicies {
+ if strutil.StrListContains(sanitizedRolePolicies, finalPolicy) {
+ return logical.ErrorResponse(fmt.Sprintf("token policy %q is disallowed by this role", finalPolicy)), logical.ErrInvalidRequest
+ }
+ }
+ }
+
+ data.Policies = finalPolicies
+
+ // No policies specified, inherit parent
+ case len(data.Policies) == 0:
+ // Only inherit "default" if the parent already has it, so don't touch addDefault here
+ data.Policies = policyutil.SanitizePolicies(parent.Policies, policyutil.DoNotAddDefaultPolicy)
+
+ // When a role is not in use or does not specify allowed/disallowed, only
+ // permit policies to be a subset unless the client has root or sudo
+ // privileges. Default is added in this case if the parent has it, unless
+ // the client specified for it not to be added.
+ case !isSudo:
+ // Sanitize passed-in and parent policies before comparison
+ sanitizedInputPolicies := policyutil.SanitizePolicies(data.Policies, policyutil.DoNotAddDefaultPolicy)
+ sanitizedParentPolicies := policyutil.SanitizePolicies(parent.Policies, policyutil.DoNotAddDefaultPolicy)
+
+ if !strutil.StrListSubset(sanitizedParentPolicies, sanitizedInputPolicies) {
+ return logical.ErrorResponse("child policies must be subset of parent"), logical.ErrInvalidRequest
+ }
+
+ // If the parent has default, and they haven't requested not to get it,
+ // add it. Note that if they have explicitly put "default" in
+ // data.Policies it will still be added because NoDefaultPolicy
+ // controls *automatic* adding.
+ if !data.NoDefaultPolicy && strutil.StrListContains(parent.Policies, "default") {
+ addDefault = true
+ }
+
+ // Add default by default in this case unless requested not to
+ case isSudo:
+ addDefault = !data.NoDefaultPolicy
+ }
+
+ te.Policies = policyutil.SanitizePolicies(data.Policies, addDefault)
+
+ // Yes, this is a little inefficient to do it like this, but meh
+ if data.NoDefaultPolicy {
+ te.Policies = strutil.StrListDelete(te.Policies, "default")
+ }
+
+ // Prevent internal policies from being assigned to tokens
+ for _, policy := range te.Policies {
+ if strutil.StrListContains(nonAssignablePolicies, policy) {
+ return logical.ErrorResponse(fmt.Sprintf("cannot assign policy %q", policy)), nil
+ }
+ }
+
+ // Prevent attempts to create a root token without an actual root token as parent.
+ // This is to thwart privilege escalation by tokens having 'sudo' privileges.
+ if strutil.StrListContains(data.Policies, "root") && !strutil.StrListContains(parent.Policies, "root") {
+ return logical.ErrorResponse("root tokens may not be created without parent token being root"), logical.ErrInvalidRequest
+ }
+
+ //
+ // NOTE: Do not modify policies below this line. We need the checks above
+ // to be the last checks as they must look at the final policy set.
+ //
+
+ switch {
+ case role != nil:
+ if role.Orphan {
+ te.Parent = ""
+ }
+
+ case data.NoParent:
+ // Only allow an orphan token if the client has sudo policy
+ if !isSudo {
+ return logical.ErrorResponse("root or sudo privileges required to create orphan token"),
+ logical.ErrInvalidRequest
+ }
+
+ te.Parent = ""
+
+ default:
+ // This comes from create-orphan, which can be properly ACLd
+ if orphan {
+ te.Parent = ""
+ }
+ }
+
+ if data.ExplicitMaxTTL != "" {
+ dur, err := parseutil.ParseDurationSecond(data.ExplicitMaxTTL)
+ if err != nil {
+ return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
+ }
+ if dur < 0 {
+ return logical.ErrorResponse("explicit_max_ttl must be positive"), logical.ErrInvalidRequest
+ }
+ te.ExplicitMaxTTL = dur
+ }
+
+ var periodToUse time.Duration
+ if data.Period != "" {
+ if !isSudo {
+ return logical.ErrorResponse("root or sudo privileges required to create periodic token"),
+ logical.ErrInvalidRequest
+ }
+ dur, err := parseutil.ParseDurationSecond(data.Period)
+ if err != nil {
+ return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
+ }
+ if dur < 0 {
+ return logical.ErrorResponse("period must be positive"), logical.ErrInvalidRequest
+ }
+ te.Period = dur
+ periodToUse = dur
+ }
+
+ // Parse the TTL/lease if any
+ if data.TTL != "" {
+ dur, err := parseutil.ParseDurationSecond(data.TTL)
+ if err != nil {
+ return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
+ }
+ if dur < 0 {
+ return logical.ErrorResponse("ttl must be positive"), logical.ErrInvalidRequest
+ }
+ te.TTL = dur
+ } else if data.Lease != "" {
+ // This block is compatibility
+ dur, err := time.ParseDuration(data.Lease)
+ if err != nil {
+ return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
+ }
+ if dur < 0 {
+ return logical.ErrorResponse("lease must be positive"), logical.ErrInvalidRequest
+ }
+ te.TTL = dur
+ }
+
+ // Set the lesser period/explicit max TTL if defined both in arguments and in role
+ if role != nil {
+ if role.ExplicitMaxTTL != 0 {
+ switch {
+ case te.ExplicitMaxTTL == 0:
+ te.ExplicitMaxTTL = role.ExplicitMaxTTL
+ default:
+ if role.ExplicitMaxTTL < te.ExplicitMaxTTL {
+ te.ExplicitMaxTTL = role.ExplicitMaxTTL
+ }
+ resp.AddWarning(fmt.Sprintf("Explicit max TTL specified both during creation call and in role; using the lesser value of %d seconds", int64(te.ExplicitMaxTTL.Seconds())))
+ }
+ }
+ if role.Period != 0 {
+ switch {
+ case periodToUse == 0:
+ periodToUse = role.Period
+ default:
+ if role.Period < periodToUse {
+ periodToUse = role.Period
+ }
+ resp.AddWarning(fmt.Sprintf("Period specified both during creation call and in role; using the lesser value of %d seconds", int64(periodToUse.Seconds())))
+ }
+ }
+ }
+
+ sysView := ts.System()
+
+ if periodToUse > 0 {
+ te.TTL = periodToUse
+ } else {
+ // Set the default lease if not provided, root tokens are exempt
+ if te.TTL == 0 && !strutil.StrListContains(te.Policies, "root") {
+ te.TTL = sysView.DefaultLeaseTTL()
+ }
+
+ // Limit the lease duration
+ if te.TTL > sysView.MaxLeaseTTL() && sysView.MaxLeaseTTL() != 0 {
+ te.TTL = sysView.MaxLeaseTTL()
+ }
+ }
+
+ // Run some bounding checks if the explicit max TTL is set; we do not check
+ // period as it's defined to escape the max TTL
+ if te.ExplicitMaxTTL > 0 {
+ // Limit the lease duration, except for periodic tokens -- in that case the explicit max limits the period, which itself can escape normal max
+ if sysView.MaxLeaseTTL() != 0 && te.ExplicitMaxTTL > sysView.MaxLeaseTTL() && periodToUse == 0 {
+ resp.AddWarning(fmt.Sprintf(
+ "Explicit max TTL of %d seconds is greater than system/mount allowed value; value is being capped to %d seconds",
+ int64(te.ExplicitMaxTTL.Seconds()), int64(sysView.MaxLeaseTTL().Seconds())))
+ te.ExplicitMaxTTL = sysView.MaxLeaseTTL()
+ }
+
+ if te.TTL == 0 {
+ // This won't be the case if it's periodic -- it will be set above
+ te.TTL = te.ExplicitMaxTTL
+ } else {
+ // Limit even in the periodic case
+ if te.TTL > te.ExplicitMaxTTL {
+ resp.AddWarning(fmt.Sprintf(
+ "Requested TTL of %d seconds higher than explicit max TTL; value being capped to %d seconds",
+ int64(te.TTL.Seconds()), int64(te.ExplicitMaxTTL.Seconds())))
+ te.TTL = te.ExplicitMaxTTL
+ }
+ }
+ }
+
+ // Don't advertise non-expiring root tokens as renewable, as attempts to renew them are denied
+ if te.TTL == 0 {
+ if parent.TTL != 0 {
+ return logical.ErrorResponse("expiring root tokens cannot create non-expiring root tokens"), logical.ErrInvalidRequest
+ }
+ renewable = false
+ }
+
+ // Create the token
+ if err := ts.create(&te); err != nil {
+ return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
+ }
+
+ // Generate the response
+ resp.Auth = &logical.Auth{
+ NumUses: te.NumUses,
+ DisplayName: te.DisplayName,
+ Policies: te.Policies,
+ Metadata: te.Meta,
+ LeaseOptions: logical.LeaseOptions{
+ TTL: te.TTL,
+ Renewable: renewable,
+ },
+ ClientToken: te.ID,
+ Accessor: te.Accessor,
+ }
+
+ if ts.policyLookupFunc != nil {
+ for _, p := range te.Policies {
+ policy, err := ts.policyLookupFunc(p)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("could not look up policy %s", p)), nil
+ }
+ if policy == nil {
+ resp.AddWarning(fmt.Sprintf("Policy %q does not exist", p))
+ }
+ }
+ }
+
+ return resp, nil
+}
+
+// handleRevokeSelf handles the auth/token/revoke-self path for revocation of tokens
+// in a way that revokes all child tokens. Normally, using sys/revoke/leaseID will revoke
+// the token and all children anyways, but that is only available when there is a lease.
+func (ts *TokenStore) handleRevokeSelf(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ // Revoke the token and its children
+ if err := ts.RevokeTree(req.ClientToken); err != nil {
+ return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
+ }
+ return nil, nil
+}
+
+// handleRevokeTree handles the auth/token/revoke/id path for revocation of tokens
+// in a way that revokes all child tokens. Normally, using sys/revoke/leaseID will revoke
+// the token and all children anyways, but that is only available when there is a lease.
+func (ts *TokenStore) handleRevokeTree(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ var urltoken bool
+ id := data.Get("token").(string)
+ if id == "" {
+ id = data.Get("urltoken").(string)
+ if id == "" {
+ return logical.ErrorResponse("missing token ID"), logical.ErrInvalidRequest
+ }
+ urltoken = true
+ }
+
+ // Revoke the token and its children
+ if err := ts.RevokeTree(id); err != nil {
+ return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
+ }
+
+ if urltoken {
+ resp := &logical.Response{}
+ resp.AddWarning(`Using a token in the path is unsafe as the token can be logged in many places. Please use POST or PUT with the token passed in via the "token" parameter.`)
+ return resp, nil
+ }
+
+ return nil, nil
+}
+
+// handleRevokeOrphan handles the auth/token/revoke-orphan/id path for revocation of tokens
+// in a way that leaves child tokens orphaned. Normally, using sys/revoke/leaseID will revoke
+// the token and all children.
+func (ts *TokenStore) handleRevokeOrphan(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ var urltoken bool
+ // Parse the id
+ id := data.Get("token").(string)
+ if id == "" {
+ id = data.Get("urltoken").(string)
+ if id == "" {
+ return logical.ErrorResponse("missing token ID"), logical.ErrInvalidRequest
+ }
+ urltoken = true
+ }
+
+ parent, err := ts.Lookup(req.ClientToken)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("parent token lookup failed: %s", err.Error())), logical.ErrInvalidRequest
+ }
+ if parent == nil {
+ return logical.ErrorResponse("parent token lookup failed"), logical.ErrInvalidRequest
+ }
+
+ // Check if the client token has sudo/root privileges for the requested path
+ isSudo := ts.System().SudoPrivilege(req.MountPoint+req.Path, req.ClientToken)
+
+ if !isSudo {
+ return logical.ErrorResponse("root or sudo privileges required to revoke and orphan"),
+ logical.ErrInvalidRequest
+ }
+
+ // Revoke and orphan
+ if err := ts.Revoke(id); err != nil {
+ return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
+ }
+
+ if urltoken {
+ resp := &logical.Response{}
+ resp.AddWarning(`Using a token in the path is unsafe as the token can be logged in many places. Please use POST or PUT with the token passed in via the "token" parameter.`)
+ return resp, nil
+ }
+
+ return nil, nil
+}
+
+func (ts *TokenStore) handleLookupSelf(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ data.Raw["token"] = req.ClientToken
+ return ts.handleLookup(req, data)
+}
+
+// handleLookup handles the auth/token/lookup/id path for querying information about
+// a particular token. This can be used to see which policies are applicable.
+func (ts *TokenStore) handleLookup(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ var urltoken bool
+ id := data.Get("token").(string)
+ if id == "" {
+ id = data.Get("urltoken").(string)
+ if id != "" {
+ urltoken = true
+ }
+ }
+ if id == "" {
+ id = req.ClientToken
+ }
+ if id == "" {
+ return logical.ErrorResponse("missing token ID"), logical.ErrInvalidRequest
+ }
+
+ lock := locksutil.LockForKey(ts.tokenLocks, id)
+ lock.RLock()
+ defer lock.RUnlock()
+
+ // Lookup the token
+ saltedId := ts.SaltID(id)
+ out, err := ts.lookupSalted(saltedId, true)
+
+ if err != nil {
+ return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
+ }
+
+ if out == nil {
+ return logical.ErrorResponse("bad token"), logical.ErrPermissionDenied
+ }
+
+ // Generate a response. We purposely omit the parent reference otherwise
+ // you could escalate your privileges.
+ resp := &logical.Response{
+ Data: map[string]interface{}{
+ "id": out.ID,
+ "accessor": out.Accessor,
+ "policies": out.Policies,
+ "path": out.Path,
+ "meta": out.Meta,
+ "display_name": out.DisplayName,
+ "num_uses": out.NumUses,
+ "orphan": false,
+ "creation_time": int64(out.CreationTime),
+ "creation_ttl": int64(out.TTL.Seconds()),
+ "expire_time": nil,
+ "ttl": int64(0),
+ "explicit_max_ttl": int64(out.ExplicitMaxTTL.Seconds()),
+ },
+ }
+
+ if out.Parent == "" {
+ resp.Data["orphan"] = true
+ }
+
+ if out.Role != "" {
+ resp.Data["role"] = out.Role
+ }
+ if out.Period != 0 {
+ resp.Data["period"] = int64(out.Period.Seconds())
+ }
+
+ // Fetch the last renewal time
+ leaseTimes, err := ts.expiration.FetchLeaseTimesByToken(out.Path, out.ID)
+ if err != nil {
+ return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
+ }
+ if leaseTimes != nil {
+ if !leaseTimes.LastRenewalTime.IsZero() {
+ resp.Data["last_renewal_time"] = leaseTimes.LastRenewalTime.Unix()
+ resp.Data["last_renewal"] = leaseTimes.LastRenewalTime
+ }
+ if !leaseTimes.ExpireTime.IsZero() {
+ resp.Data["expire_time"] = leaseTimes.ExpireTime
+ resp.Data["ttl"] = leaseTimes.ttl()
+ }
+ renewable, _ := leaseTimes.renewable()
+ resp.Data["renewable"] = renewable
+ resp.Data["issue_time"] = leaseTimes.IssueTime
+ }
+
+ if urltoken {
+ resp.AddWarning(`Using a token in the path is unsafe as the token can be logged in many places. Please use POST or PUT with the token passed in via the "token" parameter.`)
+ }
+
+ return resp, nil
+}
+
+func (ts *TokenStore) handleRenewSelf(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ data.Raw["token"] = req.ClientToken
+ return ts.handleRenew(req, data)
+}
+
+// handleRenew handles the auth/token/renew/id path for renewal of tokens.
+// This is used to prevent token expiration and revocation.
+func (ts *TokenStore) handleRenew(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ var urltoken bool
+ id := data.Get("token").(string)
+ if id == "" {
+ id = data.Get("urltoken").(string)
+ if id == "" {
+ return logical.ErrorResponse("missing token ID"), logical.ErrInvalidRequest
+ }
+ urltoken = true
+ }
+ incrementRaw := data.Get("increment").(int)
+
+ // Convert the increment
+ increment := time.Duration(incrementRaw) * time.Second
+
+ // Lookup the token
+ te, err := ts.Lookup(id)
+ if err != nil {
+ return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
+ }
+
+ // Verify the token exists
+ if te == nil {
+ return logical.ErrorResponse("token not found"), logical.ErrInvalidRequest
+ }
+
+ // Renew the token and its children
+ resp, err := ts.expiration.RenewToken(req, te.Path, te.ID, increment)
+
+ if urltoken {
+ resp.AddWarning(`Using a token in the path is unsafe as the token can be logged in many places. Please use POST or PUT with the token passed in via the "token" parameter.`)
+ }
+
+ return resp, err
+}
+
+func (ts *TokenStore) destroyCubbyhole(saltedID string) error {
+ if ts.cubbyholeBackend == nil {
+ // Should only ever happen in testing
+ return nil
+ }
+ return ts.cubbyholeBackend.revoke(salt.SaltID(ts.cubbyholeBackend.saltUUID, saltedID, salt.SHA1Hash))
+}
+
+func (ts *TokenStore) authRenew(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ if req.Auth == nil {
+ return nil, fmt.Errorf("request auth is nil")
+ }
+
+ te, err := ts.Lookup(req.Auth.ClientToken)
+ if err != nil {
+ return nil, fmt.Errorf("error looking up token: %s", err)
+ }
+ if te == nil {
+ return nil, fmt.Errorf("no token entry found during lookup")
+ }
+
+ f := framework.LeaseExtend(req.Auth.Increment, te.ExplicitMaxTTL, ts.System())
+
+ // If (te/role).Period is not zero, this is a periodic token. The TTL for a
+ // periodic token is always the same (the period value). It is not subject
+ // to normal maximum TTL checks that would come from calling LeaseExtend,
+ // so we fast path it.
+ //
+ // The one wrinkle here is if the token has an explicit max TTL. If both
+ // are set, we treat it as a regular token and use the periodic value as
+ // the increment.
+
+ // No role? Use normal LeaseExtend semantics, taking into account
+ // TokenEntry properties
+ if te.Role == "" {
+ //Explicit max TTL overrides the period, if both are set
+ if te.Period != 0 {
+ if te.ExplicitMaxTTL == 0 {
+ req.Auth.TTL = te.Period
+ return &logical.Response{Auth: req.Auth}, nil
+ } else {
+ maxTime := time.Unix(te.CreationTime, 0).Add(te.ExplicitMaxTTL)
+ if time.Now().Add(te.Period).After(maxTime) {
+ req.Auth.TTL = maxTime.Sub(time.Now())
+ } else {
+ req.Auth.TTL = te.Period
+ }
+ return &logical.Response{Auth: req.Auth}, nil
+ }
+ }
+ return f(req, d)
+ }
+
+ role, err := ts.tokenStoreRole(te.Role)
+ if err != nil {
+ return nil, fmt.Errorf("error looking up role %s: %s", te.Role, err)
+ }
+
+ if role == nil {
+ return nil, fmt.Errorf("original token role (%s) could not be found, not renewing", te.Role)
+ }
+
+ // Same deal here, but using the role period
+ if role.Period != 0 {
+ periodToUse := role.Period
+ if te.Period > 0 && te.Period < role.Period {
+ periodToUse = te.Period
+ }
+ if te.ExplicitMaxTTL == 0 {
+ req.Auth.TTL = periodToUse
+ return &logical.Response{Auth: req.Auth}, nil
+ } else {
+ maxTime := time.Unix(te.CreationTime, 0).Add(te.ExplicitMaxTTL)
+ if time.Now().Add(periodToUse).After(maxTime) {
+ req.Auth.TTL = maxTime.Sub(time.Now())
+ } else {
+ req.Auth.TTL = periodToUse
+ }
+ return &logical.Response{Auth: req.Auth}, nil
+ }
+ }
+
+ return f(req, d)
+}
+
+func (ts *TokenStore) tokenStoreRole(name string) (*tsRoleEntry, error) {
+ entry, err := ts.view.Get(fmt.Sprintf("%s%s", rolesPrefix, name))
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result tsRoleEntry
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+func (ts *TokenStore) tokenStoreRoleList(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ entries, err := ts.view.List(rolesPrefix)
+ if err != nil {
+ return nil, err
+ }
+
+ ret := make([]string, len(entries))
+ for i, entry := range entries {
+ ret[i] = strings.TrimPrefix(entry, rolesPrefix)
+ }
+
+ return logical.ListResponse(ret), nil
+}
+
+func (ts *TokenStore) tokenStoreRoleDelete(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ err := ts.view.Delete(fmt.Sprintf("%s%s", rolesPrefix, data.Get("role_name").(string)))
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (ts *TokenStore) tokenStoreRoleRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ role, err := ts.tokenStoreRole(data.Get("role_name").(string))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ resp := &logical.Response{
+ Data: map[string]interface{}{
+ "period": int64(role.Period.Seconds()),
+ "explicit_max_ttl": int64(role.ExplicitMaxTTL.Seconds()),
+ "disallowed_policies": role.DisallowedPolicies,
+ "allowed_policies": role.AllowedPolicies,
+ "name": role.Name,
+ "orphan": role.Orphan,
+ "path_suffix": role.PathSuffix,
+ "renewable": role.Renewable,
+ },
+ }
+
+ return resp, nil
+}
+
+func (ts *TokenStore) tokenStoreRoleExistenceCheck(req *logical.Request, data *framework.FieldData) (bool, error) {
+ name := data.Get("role_name").(string)
+ if name == "" {
+ return false, fmt.Errorf("role name cannot be empty")
+ }
+ role, err := ts.tokenStoreRole(name)
+ if err != nil {
+ return false, err
+ }
+
+ return role != nil, nil
+}
+
+func (ts *TokenStore) tokenStoreRoleCreateUpdate(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ name := data.Get("role_name").(string)
+ if name == "" {
+ return logical.ErrorResponse("role name cannot be empty"), nil
+ }
+ entry, err := ts.tokenStoreRole(name)
+ if err != nil {
+ return nil, err
+ }
+
+ // Due to the existence check, entry will only be nil if it's a create
+ // operation, so just create a new one
+ if entry == nil {
+ entry = &tsRoleEntry{
+ Name: name,
+ }
+ }
+
+ // In this series of blocks, if we do not find a user-provided value and
+ // it's a creation operation, we call data.Get to get the appropriate
+ // default
+
+ orphanInt, ok := data.GetOk("orphan")
+ if ok {
+ entry.Orphan = orphanInt.(bool)
+ } else if req.Operation == logical.CreateOperation {
+ entry.Orphan = data.Get("orphan").(bool)
+ }
+
+ periodInt, ok := data.GetOk("period")
+ if ok {
+ entry.Period = time.Second * time.Duration(periodInt.(int))
+ } else if req.Operation == logical.CreateOperation {
+ entry.Period = time.Second * time.Duration(data.Get("period").(int))
+ }
+
+ renewableInt, ok := data.GetOk("renewable")
+ if ok {
+ entry.Renewable = renewableInt.(bool)
+ } else if req.Operation == logical.CreateOperation {
+ entry.Renewable = data.Get("renewable").(bool)
+ }
+
+ var resp *logical.Response
+
+ explicitMaxTTLInt, ok := data.GetOk("explicit_max_ttl")
+ if ok {
+ entry.ExplicitMaxTTL = time.Second * time.Duration(explicitMaxTTLInt.(int))
+ } else if req.Operation == logical.CreateOperation {
+ entry.ExplicitMaxTTL = time.Second * time.Duration(data.Get("explicit_max_ttl").(int))
+ }
+ if entry.ExplicitMaxTTL != 0 {
+ sysView := ts.System()
+
+ if sysView.MaxLeaseTTL() != time.Duration(0) && entry.ExplicitMaxTTL > sysView.MaxLeaseTTL() {
+ if resp == nil {
+ resp = &logical.Response{}
+ }
+ resp.AddWarning(fmt.Sprintf(
+ "Given explicit max TTL of %d is greater than system/mount allowed value of %d seconds; until this is fixed attempting to create tokens against this role will result in an error",
+ int64(entry.ExplicitMaxTTL.Seconds()), int64(sysView.MaxLeaseTTL().Seconds())))
+ }
+ }
+
+ pathSuffixInt, ok := data.GetOk("path_suffix")
+ if ok {
+ pathSuffix := pathSuffixInt.(string)
+ if pathSuffix != "" {
+ matched := pathSuffixSanitize.MatchString(pathSuffix)
+ if !matched {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "given role path suffix contains invalid characters; must match %s",
+ pathSuffixSanitize.String())), nil
+ }
+ entry.PathSuffix = pathSuffix
+ }
+ } else if req.Operation == logical.CreateOperation {
+ entry.PathSuffix = data.Get("path_suffix").(string)
+ }
+
+ allowedPoliciesStr, ok := data.GetOk("allowed_policies")
+ if ok {
+ entry.AllowedPolicies = policyutil.SanitizePolicies(strings.Split(allowedPoliciesStr.(string), ","), policyutil.DoNotAddDefaultPolicy)
+ } else if req.Operation == logical.CreateOperation {
+ entry.AllowedPolicies = policyutil.SanitizePolicies(strings.Split(data.Get("allowed_policies").(string), ","), policyutil.DoNotAddDefaultPolicy)
+ }
+
+ disallowedPoliciesStr, ok := data.GetOk("disallowed_policies")
+ if ok {
+ entry.DisallowedPolicies = strutil.ParseDedupLowercaseAndSortStrings(disallowedPoliciesStr.(string), ",")
+ } else if req.Operation == logical.CreateOperation {
+ entry.DisallowedPolicies = strutil.ParseDedupLowercaseAndSortStrings(data.Get("disallowed_policies").(string), ",")
+ }
+
+ // Store it
+ jsonEntry, err := logical.StorageEntryJSON(fmt.Sprintf("%s%s", rolesPrefix, name), entry)
+ if err != nil {
+ return nil, err
+ }
+ if err := ts.view.Put(jsonEntry); err != nil {
+ return nil, err
+ }
+
+ return resp, nil
+}
+
+const (
+ tokenTidyHelp = `
+This endpoint performs cleanup tasks that can be run if certain error
+conditions have occurred.
+`
+ tokenTidyDesc = `
+This endpoint performs cleanup tasks that can be run to clean up token and
+lease entries after certain error conditions. Usually running this is not
+necessary, and is only required if upgrade notes or support personnel suggest
+it.
+`
+ tokenBackendHelp = `The token credential backend is always enabled and builtin to Vault.
+Client tokens are used to identify a client and to allow Vault to associate policies and ACLs
+which are enforced on every request. This backend also allows for generating sub-tokens as well
+as revocation of tokens. The tokens are renewable if associated with a lease.`
+ tokenCreateHelp = `The token create path is used to create new tokens.`
+ tokenCreateOrphanHelp = `The token create path is used to create new orphan tokens.`
+ tokenCreateRoleHelp = `This token create path is used to create new tokens adhering to the given role.`
+ tokenListRolesHelp = `This endpoint lists configured roles.`
+ tokenLookupAccessorHelp = `This endpoint will lookup a token associated with the given accessor and its properties. Response will not contain the token ID.`
+ tokenLookupHelp = `This endpoint will lookup a token and its properties.`
+ tokenPathRolesHelp = `This endpoint allows creating, reading, and deleting roles.`
+ tokenRevokeAccessorHelp = `This endpoint will delete the token associated with the accessor and all of its child tokens.`
+ tokenRevokeHelp = `This endpoint will delete the given token and all of its child tokens.`
+ tokenRevokeSelfHelp = `This endpoint will delete the token used to call it and all of its child tokens.`
+ tokenRevokeOrphanHelp = `This endpoint will delete the token and orphan its child tokens.`
+ tokenRenewHelp = `This endpoint will renew the given token and prevent expiration.`
+ tokenRenewSelfHelp = `This endpoint will renew the token used to call it and prevent expiration.`
+ tokenAllowedPoliciesHelp = `If set, tokens can be created with any subset of the policies in this
+list, rather than the normal semantics of tokens being a subset of the
+calling token's policies. The parameter is a comma-delimited string of
+policy names.`
+ tokenDisallowedPoliciesHelp = `If set, successful token creation via this role will require that
+no policies in the given list are requested. The parameter is a comma-delimited string of policy names.`
+ tokenOrphanHelp = `If true, tokens created via this role
+will be orphan tokens (have no parent)`
+ tokenPeriodHelp = `If set, tokens created via this role
+will have no max lifetime; instead, their
+renewal period will be fixed to this value.
+This takes an integer number of seconds,
+or a string duration (e.g. "24h").`
+ tokenPathSuffixHelp = `If set, tokens created via this role
+will contain the given suffix as a part of
+their path. This can be used to assist use
+of the 'revoke-prefix' endpoint later on.
+The given suffix must match the regular
+expression.`
+ tokenExplicitMaxTTLHelp = `If set, tokens created via this role
+carry an explicit maximum TTL. During renewal,
+the current maximum TTL values of the role
+and the mount are not checked for changes,
+and any updates to these values will have
+no effect on the token being renewed.`
+ tokenRenewableHelp = `Tokens created via this role will be
+renewable or not according to this value.
+Defaults to "true".`
+ tokenListAccessorsHelp = `List token accessors, which can then be
+be used to iterate and discover their properities
+or revoke them. Because this can be used to
+cause a denial of service, this endpoint
+requires 'sudo' capability in addition to
+'list'.`
+)
diff --git a/vendor/github.com/hashicorp/vault/vault/token_store_test.go b/vendor/github.com/hashicorp/vault/vault/token_store_test.go
new file mode 100644
index 0000000..7a84fe7
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/token_store_test.go
@@ -0,0 +1,3475 @@
+package vault
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/logical"
+)
+
+type TokenEntryOld struct {
+ ID string
+ Accessor string
+ Parent string
+ Policies []string
+ Path string
+ Meta map[string]string
+ DisplayName string
+ NumUses int
+ CreationTime int64
+ TTL time.Duration
+ ExplicitMaxTTL time.Duration
+ Role string
+ Period time.Duration
+}
+
+func TestTokenStore_TokenEntryUpgrade(t *testing.T) {
+ var err error
+ _, ts, _, _ := TestCoreWithTokenStore(t)
+
+ // Use a struct that does not have struct tags to store the items and
+ // check if the lookup code handles them properly while reading back
+ entry := &TokenEntryOld{
+ DisplayName: "test-display-name",
+ Path: "test",
+ Policies: []string{"dev", "ops"},
+ CreationTime: time.Now().Unix(),
+ ExplicitMaxTTL: 100,
+ NumUses: 10,
+ }
+ entry.ID, err = uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ enc, err := json.Marshal(entry)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ saltedId := ts.SaltID(entry.ID)
+ path := lookupPrefix + saltedId
+ le := &logical.StorageEntry{
+ Key: path,
+ Value: enc,
+ }
+
+ if err := ts.view.Put(le); err != nil {
+ t.Fatal(err)
+ }
+
+ out, err := ts.Lookup(entry.ID)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if out.DisplayName != "test-display-name" {
+ t.Fatalf("bad: display_name: expected: test-display-name, actual: %s", out.DisplayName)
+ }
+ if out.CreationTime == 0 {
+ t.Fatal("bad: expected a non-zero creation time")
+ }
+ if out.ExplicitMaxTTL != 100 {
+ t.Fatalf("bad: explicit_max_ttl: expected: 100, actual: %d", out.ExplicitMaxTTL)
+ }
+ if out.NumUses != 10 {
+ t.Fatalf("bad: num_uses: expected: 10, actual: %d", out.NumUses)
+ }
+
+ // Test the default case to ensure there are no regressions
+ ent := &TokenEntry{
+ DisplayName: "test-display-name",
+ Path: "test",
+ Policies: []string{"dev", "ops"},
+ CreationTime: time.Now().Unix(),
+ ExplicitMaxTTL: 100,
+ NumUses: 10,
+ }
+ if err := ts.create(ent); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ out, err = ts.Lookup(ent.ID)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if out.DisplayName != "test-display-name" {
+ t.Fatalf("bad: display_name: expected: test-display-name, actual: %s", out.DisplayName)
+ }
+ if out.CreationTime == 0 {
+ t.Fatal("bad: expected a non-zero creation time")
+ }
+ if out.ExplicitMaxTTL != 100 {
+ t.Fatalf("bad: explicit_max_ttl: expected: 100, actual: %d", out.ExplicitMaxTTL)
+ }
+ if out.NumUses != 10 {
+ t.Fatalf("bad: num_uses: expected: 10, actual: %d", out.NumUses)
+ }
+
+ // Fill in the deprecated fields and read out from proper fields
+ ent = &TokenEntry{
+ Path: "test",
+ Policies: []string{"dev", "ops"},
+ DisplayNameDeprecated: "test-display-name",
+ CreationTimeDeprecated: time.Now().Unix(),
+ ExplicitMaxTTLDeprecated: 100,
+ NumUsesDeprecated: 10,
+ }
+ if err := ts.create(ent); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ out, err = ts.Lookup(ent.ID)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if out.DisplayName != "test-display-name" {
+ t.Fatalf("bad: display_name: expected: test-display-name, actual: %s", out.DisplayName)
+ }
+ if out.CreationTime == 0 {
+ t.Fatal("bad: expected a non-zero creation time")
+ }
+ if out.ExplicitMaxTTL != 100 {
+ t.Fatalf("bad: explicit_max_ttl: expected: 100, actual: %d", out.ExplicitMaxTTL)
+ }
+ if out.NumUses != 10 {
+ t.Fatalf("bad: num_uses: expected: 10, actual: %d", out.NumUses)
+ }
+
+ // Check if NumUses picks up a lower value
+ ent = &TokenEntry{
+ Path: "test",
+ NumUses: 5,
+ NumUsesDeprecated: 10,
+ }
+ if err := ts.create(ent); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ out, err = ts.Lookup(ent.ID)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if out.NumUses != 5 {
+ t.Fatalf("bad: num_uses: expected: 5, actual: %d", out.NumUses)
+ }
+
+ // Switch the values from deprecated and proper field and check if the
+ // lower value is still getting picked up
+ ent = &TokenEntry{
+ Path: "test",
+ NumUses: 10,
+ NumUsesDeprecated: 5,
+ }
+ if err := ts.create(ent); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ out, err = ts.Lookup(ent.ID)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if out.NumUses != 5 {
+ t.Fatalf("bad: num_uses: expected: 5, actual: %d", out.NumUses)
+ }
+}
+
+func getBackendConfig(c *Core) *logical.BackendConfig {
+ return &logical.BackendConfig{
+ Logger: c.logger,
+ System: logical.StaticSystemView{
+ DefaultLeaseTTLVal: time.Hour * 24,
+ MaxLeaseTTLVal: time.Hour * 24 * 32,
+ },
+ }
+}
+
+func testMakeToken(t *testing.T, ts *TokenStore, root, client, ttl string, policy []string) {
+ req := logical.TestRequest(t, logical.UpdateOperation, "create")
+ req.ClientToken = root
+ req.Data["id"] = client
+ req.Data["policies"] = policy
+ req.Data["ttl"] = ttl
+
+ resp, err := ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp.Auth.ClientToken != client {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
+
+func testCoreMakeToken(t *testing.T, c *Core, root, client, ttl string, policy []string) {
+ req := logical.TestRequest(t, logical.UpdateOperation, "auth/token/create")
+ req.ClientToken = root
+ req.Data["id"] = client
+ req.Data["policies"] = policy
+ req.Data["ttl"] = ttl
+
+ resp, err := c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp.Auth.ClientToken != client {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
+
+func TestTokenStore_AccessorIndex(t *testing.T) {
+ _, ts, _, _ := TestCoreWithTokenStore(t)
+
+ ent := &TokenEntry{Path: "test", Policies: []string{"dev", "ops"}}
+ if err := ts.create(ent); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ out, err := ts.Lookup(ent.ID)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Ensure that accessor is created
+ if out == nil || out.Accessor == "" {
+ t.Fatalf("bad: %#v", out)
+ }
+
+ aEntry, err := ts.lookupByAccessor(out.Accessor)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Verify that the value returned from the index matches the token ID
+ if aEntry.TokenID != ent.ID {
+ t.Fatalf("bad: got\n%s\nexpected\n%s\n", aEntry.TokenID, ent.ID)
+ }
+}
+
+func TestTokenStore_HandleRequest_LookupAccessor(t *testing.T) {
+ _, ts, _, root := TestCoreWithTokenStore(t)
+ testMakeToken(t, ts, root, "tokenid", "", []string{"foo"})
+ out, err := ts.Lookup("tokenid")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if out == nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "lookup-accessor")
+ req.Data = map[string]interface{}{
+ "accessor": out.Accessor,
+ }
+
+ resp, err := ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if resp.Data == nil {
+ t.Fatalf("response should contain data")
+ }
+
+ if resp.Data["accessor"].(string) == "" {
+ t.Fatalf("accessor should not be empty")
+ }
+
+ // Verify that the lookup-accessor operation does not return the token ID
+ if resp.Data["id"].(string) != "" {
+ t.Fatalf("token ID should not be returned")
+ }
+}
+
+func TestTokenStore_HandleRequest_ListAccessors(t *testing.T) {
+ _, ts, _, root := TestCoreWithTokenStore(t)
+
+ testKeys := []string{"token1", "token2", "token3", "token4"}
+ for _, key := range testKeys {
+ testMakeToken(t, ts, root, key, "", []string{"foo"})
+ }
+
+ // Revoke root to make the number of accessors match
+ ts.revokeSalted(ts.SaltID(root))
+
+ req := logical.TestRequest(t, logical.ListOperation, "accessors")
+
+ resp, err := ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if resp.Data == nil {
+ t.Fatalf("response should contain data")
+ }
+ if resp.Data["keys"] == nil {
+ t.Fatalf("keys should not be empty")
+ }
+ keys := resp.Data["keys"].([]string)
+ if len(keys) != len(testKeys) {
+ t.Fatalf("wrong number of accessors found")
+ }
+ if len(resp.Warnings()) != 0 {
+ t.Fatalf("got warnings:\n%#v", resp.Warnings())
+ }
+
+ // Test upgrade from old struct method of accessor storage (of token id)
+ for _, accessor := range keys {
+ aEntry, err := ts.lookupByAccessor(accessor)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if aEntry.TokenID == "" || aEntry.AccessorID == "" {
+ t.Fatalf("error, accessor entry looked up is empty, but no error thrown")
+ }
+ path := accessorPrefix + ts.SaltID(accessor)
+ le := &logical.StorageEntry{Key: path, Value: []byte(aEntry.TokenID)}
+ if err := ts.view.Put(le); err != nil {
+ t.Fatalf("failed to persist accessor index entry: %v", err)
+ }
+ }
+
+ // Do the lookup again, should get same result
+ resp, err = ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if resp.Data == nil {
+ t.Fatalf("response should contain data")
+ }
+ if resp.Data["keys"] == nil {
+ t.Fatalf("keys should not be empty")
+ }
+ keys2 := resp.Data["keys"].([]string)
+ if len(keys) != len(testKeys) {
+ t.Fatalf("wrong number of accessors found")
+ }
+ if len(resp.Warnings()) != 0 {
+ t.Fatalf("got warnings:\n%#v", resp.Warnings())
+ }
+
+ for _, accessor := range keys2 {
+ aEntry, err := ts.lookupByAccessor(accessor)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if aEntry.TokenID == "" || aEntry.AccessorID == "" {
+ t.Fatalf("error, accessor entry looked up is empty, but no error thrown")
+ }
+ }
+}
+
+func TestTokenStore_HandleRequest_RevokeAccessor(t *testing.T) {
+ _, ts, _, root := TestCoreWithTokenStore(t)
+ testMakeToken(t, ts, root, "tokenid", "", []string{"foo"})
+ out, err := ts.Lookup("tokenid")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if out == nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "revoke-accessor")
+ req.Data = map[string]interface{}{
+ "accessor": out.Accessor,
+ }
+
+ _, err = ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ out, err = ts.Lookup("tokenid")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if out != nil {
+ t.Fatalf("bad:\ngot %#v\nexpected: nil\n", out)
+ }
+}
+
+func TestTokenStore_RootToken(t *testing.T) {
+ _, ts, _, _ := TestCoreWithTokenStore(t)
+
+ te, err := ts.rootToken()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if te.ID == "" {
+ t.Fatalf("missing ID")
+ }
+
+ out, err := ts.Lookup(te.ID)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !reflect.DeepEqual(out, te) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v", te, out)
+ }
+}
+
+func TestTokenStore_CreateLookup(t *testing.T) {
+ c, ts, _, _ := TestCoreWithTokenStore(t)
+
+ ent := &TokenEntry{Path: "test", Policies: []string{"dev", "ops"}}
+ if err := ts.create(ent); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if ent.ID == "" {
+ t.Fatalf("missing ID")
+ }
+
+ out, err := ts.Lookup(ent.ID)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !reflect.DeepEqual(out, ent) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v", ent, out)
+ }
+
+ // New store should share the salt
+ ts2, err := NewTokenStore(c, getBackendConfig(c))
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if err := ts2.Initialize(); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should still match
+ out, err = ts2.Lookup(ent.ID)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !reflect.DeepEqual(out, ent) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v", ent, out)
+ }
+}
+
+func TestTokenStore_CreateLookup_ProvidedID(t *testing.T) {
+ c, ts, _, _ := TestCoreWithTokenStore(t)
+
+ ent := &TokenEntry{
+ ID: "foobarbaz",
+ Path: "test",
+ Policies: []string{"dev", "ops"},
+ }
+ if err := ts.create(ent); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if ent.ID != "foobarbaz" {
+ t.Fatalf("bad: ent.ID: expected:\"foobarbaz\"\n actual:%s", ent.ID)
+ }
+
+ out, err := ts.Lookup(ent.ID)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !reflect.DeepEqual(out, ent) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v", ent, out)
+ }
+
+ // New store should share the salt
+ ts2, err := NewTokenStore(c, getBackendConfig(c))
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if err := ts2.Initialize(); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should still match
+ out, err = ts2.Lookup(ent.ID)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !reflect.DeepEqual(out, ent) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v", ent, out)
+ }
+}
+
+func TestTokenStore_UseToken(t *testing.T) {
+ _, ts, _, root := TestCoreWithTokenStore(t)
+
+ // Lookup the root token
+ ent, err := ts.Lookup(root)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Root is an unlimited use token, should be a no-op
+ te, err := ts.UseToken(ent)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if te == nil {
+ t.Fatalf("token entry after use was nil")
+ }
+
+ // Lookup the root token again
+ ent2, err := ts.Lookup(root)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if !reflect.DeepEqual(ent, ent2) {
+ t.Fatalf("bad: ent:%#v ent2:%#v", ent, ent2)
+ }
+
+ // Create a retstricted token
+ ent = &TokenEntry{Path: "test", Policies: []string{"dev", "ops"}, NumUses: 2}
+ if err := ts.create(ent); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Use the token
+ te, err = ts.UseToken(ent)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if te == nil {
+ t.Fatalf("token entry for use #1 was nil")
+ }
+
+ // Lookup the token
+ ent2, err = ts.Lookup(ent.ID)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should be reduced
+ if ent2.NumUses != 1 {
+ t.Fatalf("bad: %#v", ent2)
+ }
+
+ // Use the token
+ te, err = ts.UseToken(ent)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if te == nil {
+ t.Fatalf("token entry for use #2 was nil")
+ }
+ if te.NumUses != tokenRevocationDeferred {
+ t.Fatalf("token entry after use #2 did not have revoke flag")
+ }
+ ts.Revoke(te.ID)
+
+ // Lookup the token
+ ent2, err = ts.Lookup(ent.ID)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should be revoked
+ if ent2 != nil {
+ t.Fatalf("bad: %#v", ent2)
+ }
+}
+
+func TestTokenStore_Revoke(t *testing.T) {
+ _, ts, _, _ := TestCoreWithTokenStore(t)
+
+ ent := &TokenEntry{Path: "test", Policies: []string{"dev", "ops"}}
+ if err := ts.create(ent); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ err := ts.Revoke("")
+ if err.Error() != "cannot revoke blank token" {
+ t.Fatalf("err: %v", err)
+ }
+ err = ts.Revoke(ent.ID)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ out, err := ts.Lookup(ent.ID)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out != nil {
+ t.Fatalf("bad: %#v", out)
+ }
+}
+
+func TestTokenStore_Revoke_Leases(t *testing.T) {
+ c, ts, _, _ := TestCoreWithTokenStore(t)
+
+ view := NewBarrierView(c.barrier, "noop/")
+
+ // Mount a noop backend
+ noop := &NoopBackend{}
+ ts.expiration.router.Mount(noop, "", &MountEntry{UUID: ""}, view)
+
+ ent := &TokenEntry{Path: "test", Policies: []string{"dev", "ops"}}
+ if err := ts.create(ent); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Register a lease
+ req := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "secret/foo",
+ ClientToken: ent.ID,
+ }
+ resp := &logical.Response{
+ Secret: &logical.Secret{
+ LeaseOptions: logical.LeaseOptions{
+ TTL: 20 * time.Millisecond,
+ },
+ },
+ Data: map[string]interface{}{
+ "access_key": "xyz",
+ "secret_key": "abcd",
+ },
+ }
+ leaseID, err := ts.expiration.Register(req, resp)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Revoke the token
+ err = ts.Revoke(ent.ID)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Verify the lease is gone
+ out, err := ts.expiration.loadEntry(leaseID)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out != nil {
+ t.Fatalf("bad: %#v", out)
+ }
+}
+
+func TestTokenStore_Revoke_Orphan(t *testing.T) {
+ _, ts, _, _ := TestCoreWithTokenStore(t)
+
+ ent := &TokenEntry{Path: "test", Policies: []string{"dev", "ops"}}
+ if err := ts.create(ent); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ ent2 := &TokenEntry{Parent: ent.ID}
+ if err := ts.create(ent2); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ err := ts.Revoke(ent.ID)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ out, err := ts.Lookup(ent2.ID)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !reflect.DeepEqual(out, ent2) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v", ent2, out)
+ }
+}
+
+func TestTokenStore_RevokeTree(t *testing.T) {
+ _, ts, _, _ := TestCoreWithTokenStore(t)
+
+ ent1 := &TokenEntry{}
+ if err := ts.create(ent1); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ ent2 := &TokenEntry{Parent: ent1.ID}
+ if err := ts.create(ent2); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ ent3 := &TokenEntry{Parent: ent2.ID}
+ if err := ts.create(ent3); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ ent4 := &TokenEntry{Parent: ent2.ID}
+ if err := ts.create(ent4); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ err := ts.RevokeTree("")
+ if err.Error() != "cannot tree-revoke blank token" {
+ t.Fatalf("err: %v", err)
+ }
+ err = ts.RevokeTree(ent1.ID)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ lookup := []string{ent1.ID, ent2.ID, ent3.ID, ent4.ID}
+ for _, id := range lookup {
+ out, err := ts.Lookup(id)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out != nil {
+ t.Fatalf("bad: %#v", out)
+ }
+ }
+}
+
+func TestTokenStore_RevokeSelf(t *testing.T) {
+ _, ts, _, _ := TestCoreWithTokenStore(t)
+
+ ent1 := &TokenEntry{}
+ if err := ts.create(ent1); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ ent2 := &TokenEntry{Parent: ent1.ID}
+ if err := ts.create(ent2); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ ent3 := &TokenEntry{Parent: ent2.ID}
+ if err := ts.create(ent3); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ ent4 := &TokenEntry{Parent: ent2.ID}
+ if err := ts.create(ent4); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "revoke-self")
+ req.ClientToken = ent1.ID
+
+ resp, err := ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+
+ lookup := []string{ent1.ID, ent2.ID, ent3.ID, ent4.ID}
+ for _, id := range lookup {
+ out, err := ts.Lookup(id)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out != nil {
+ t.Fatalf("bad: %#v", out)
+ }
+ }
+}
+
+func TestTokenStore_HandleRequest_NonAssignable(t *testing.T) {
+ _, ts, _, root := TestCoreWithTokenStore(t)
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "create")
+ req.ClientToken = root
+ req.Data["policies"] = []string{"default", "foo"}
+
+ resp, err := ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+
+ req.Data["policies"] = []string{"default", "foo", responseWrappingPolicyName}
+
+ resp, err = ts.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil {
+ t.Fatal("got a nil response")
+ }
+ if !resp.IsError() {
+ t.Fatalf("expected error; response is %#v", *resp)
+ }
+}
+
+func TestTokenStore_HandleRequest_CreateToken_DisplayName(t *testing.T) {
+ _, ts, _, root := TestCoreWithTokenStore(t)
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "create")
+ req.ClientToken = root
+ req.Data["display_name"] = "foo_bar.baz!"
+
+ resp, err := ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+
+ expected := &TokenEntry{
+ ID: resp.Auth.ClientToken,
+ Accessor: resp.Auth.Accessor,
+ Parent: root,
+ Policies: []string{"root"},
+ Path: "auth/token/create",
+ DisplayName: "token-foo-bar-baz",
+ TTL: 0,
+ }
+ out, err := ts.Lookup(resp.Auth.ClientToken)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ expected.CreationTime = out.CreationTime
+ if !reflect.DeepEqual(out, expected) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v", expected, out)
+ }
+}
+
+func TestTokenStore_HandleRequest_CreateToken_NumUses(t *testing.T) {
+ _, ts, _, root := TestCoreWithTokenStore(t)
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "create")
+ req.ClientToken = root
+ req.Data["num_uses"] = "1"
+
+ resp, err := ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+
+ expected := &TokenEntry{
+ ID: resp.Auth.ClientToken,
+ Accessor: resp.Auth.Accessor,
+ Parent: root,
+ Policies: []string{"root"},
+ Path: "auth/token/create",
+ DisplayName: "token",
+ NumUses: 1,
+ TTL: 0,
+ }
+ out, err := ts.Lookup(resp.Auth.ClientToken)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ expected.CreationTime = out.CreationTime
+ if !reflect.DeepEqual(out, expected) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v", expected, out)
+ }
+}
+
+func TestTokenStore_HandleRequest_CreateToken_NumUses_Invalid(t *testing.T) {
+ _, ts, _, root := TestCoreWithTokenStore(t)
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "create")
+ req.ClientToken = root
+ req.Data["num_uses"] = "-1"
+
+ resp, err := ts.HandleRequest(req)
+ if err != logical.ErrInvalidRequest {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+}
+
+func TestTokenStore_HandleRequest_CreateToken_NumUses_Restricted(t *testing.T) {
+ _, ts, _, root := TestCoreWithTokenStore(t)
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "create")
+ req.ClientToken = root
+ req.Data["num_uses"] = "1"
+
+ resp, err := ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+
+ // We should NOT be able to use the restricted token to create a new token
+ req.ClientToken = resp.Auth.ClientToken
+ _, err = ts.HandleRequest(req)
+ if err != logical.ErrInvalidRequest {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+}
+
+func TestTokenStore_HandleRequest_CreateToken_NoPolicy(t *testing.T) {
+ _, ts, _, root := TestCoreWithTokenStore(t)
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "create")
+ req.ClientToken = root
+
+ resp, err := ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+
+ expected := &TokenEntry{
+ ID: resp.Auth.ClientToken,
+ Accessor: resp.Auth.Accessor,
+ Parent: root,
+ Policies: []string{"root"},
+ Path: "auth/token/create",
+ DisplayName: "token",
+ TTL: 0,
+ }
+ out, err := ts.Lookup(resp.Auth.ClientToken)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ expected.CreationTime = out.CreationTime
+ if !reflect.DeepEqual(out, expected) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v", expected, out)
+ }
+}
+
+func TestTokenStore_HandleRequest_CreateToken_BadParent(t *testing.T) {
+ _, ts, _, _ := TestCoreWithTokenStore(t)
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "create")
+ req.ClientToken = "random"
+
+ resp, err := ts.HandleRequest(req)
+ if err != logical.ErrInvalidRequest {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp.Data["error"] != "parent token lookup failed" {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
+
+func TestTokenStore_HandleRequest_CreateToken(t *testing.T) {
+ _, ts, _, root := TestCoreWithTokenStore(t)
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "create")
+ req.ClientToken = root
+ req.Data["policies"] = []string{"foo"}
+
+ resp, err := ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp.Auth.ClientToken == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
+
+func TestTokenStore_HandleRequest_CreateToken_RootID(t *testing.T) {
+ _, ts, _, root := TestCoreWithTokenStore(t)
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "create")
+ req.ClientToken = root
+ req.Data["id"] = "foobar"
+ req.Data["policies"] = []string{"foo"}
+
+ resp, err := ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp.Auth.ClientToken != "foobar" {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
+
+func TestTokenStore_HandleRequest_CreateToken_NonRootID(t *testing.T) {
+ _, ts, _, root := TestCoreWithTokenStore(t)
+ testMakeToken(t, ts, root, "client", "", []string{"foo"})
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "create")
+ req.ClientToken = "client"
+ req.Data["id"] = "foobar"
+ req.Data["policies"] = []string{"foo"}
+
+ resp, err := ts.HandleRequest(req)
+ if err != logical.ErrInvalidRequest {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp.Data["error"] != "root or sudo privileges required to specify token id" {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
+
+func TestTokenStore_HandleRequest_CreateToken_NonRoot_Subset(t *testing.T) {
+ _, ts, _, root := TestCoreWithTokenStore(t)
+ testMakeToken(t, ts, root, "client", "", []string{"foo", "bar"})
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "create")
+ req.ClientToken = "client"
+ req.Data["policies"] = []string{"foo"}
+
+ resp, err := ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp.Auth.ClientToken == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
+
+func TestTokenStore_HandleRequest_CreateToken_NonRoot_InvalidSubset(t *testing.T) {
+ _, ts, _, root := TestCoreWithTokenStore(t)
+ testMakeToken(t, ts, root, "client", "", []string{"foo", "bar"})
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "create")
+ req.ClientToken = "client"
+ req.Data["policies"] = []string{"foo", "bar", "baz"}
+
+ resp, err := ts.HandleRequest(req)
+ if err != logical.ErrInvalidRequest {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp.Data["error"] != "child policies must be subset of parent" {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
+
+func TestTokenStore_HandleRequest_CreateToken_NonRoot_RootChild(t *testing.T) {
+ core, ts, _, root := TestCoreWithTokenStore(t)
+ ps := core.policyStore
+
+ policy, _ := Parse(tokenCreationPolicy)
+ policy.Name = "test1"
+ if err := ps.SetPolicy(policy); err != nil {
+ t.Fatal(err)
+ }
+
+ testMakeToken(t, ts, root, "sudoClient", "", []string{"test1"})
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "create")
+ req.ClientToken = "sudoClient"
+ req.MountPoint = "auth/token/"
+ req.Data["policies"] = []string{"root"}
+
+ resp, err := ts.HandleRequest(req)
+ if err != logical.ErrInvalidRequest {
+ t.Fatalf("err: %v; resp: %#v", err, resp)
+ }
+ if resp == nil || resp.Data == nil {
+ t.Fatalf("expected a response")
+ }
+ if resp.Data["error"].(string) != "root tokens may not be created without parent token being root" {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
+
+func TestTokenStore_HandleRequest_CreateToken_Root_RootChild_NoExpiry_Expiry(t *testing.T) {
+ _, ts, _, root := TestCoreWithTokenStore(t)
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "create")
+ req.ClientToken = root
+ req.Data = map[string]interface{}{
+ "ttl": "5m",
+ }
+
+ resp, err := ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v; resp: %#v", err, resp)
+ }
+ if resp == nil || resp.Auth == nil {
+ t.Fatalf("failed to create a root token using another root token")
+ }
+ if !reflect.DeepEqual(resp.Auth.Policies, []string{"root"}) {
+ t.Fatalf("bad: policies: expected: root; actual: %s", resp.Auth.Policies)
+ }
+ if resp.Auth.TTL.Seconds() != 300 {
+ t.Fatalf("bad: expected 300 second ttl, got %v", resp.Auth.TTL.Seconds())
+ }
+
+ req.ClientToken = resp.Auth.ClientToken
+ req.Data = map[string]interface{}{
+ "ttl": "0",
+ }
+ resp, err = ts.HandleRequest(req)
+ if err == nil {
+ t.Fatalf("expected error")
+ }
+}
+
+func TestTokenStore_HandleRequest_CreateToken_Root_RootChild(t *testing.T) {
+ _, ts, _, root := TestCoreWithTokenStore(t)
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "create")
+ req.ClientToken = root
+
+ resp, err := ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v; resp: %#v", err, resp)
+ }
+ if resp == nil || resp.Auth == nil {
+ t.Fatalf("failed to create a root token using another root token")
+ }
+ if !reflect.DeepEqual(resp.Auth.Policies, []string{"root"}) {
+ t.Fatalf("bad: policies: expected: root; actual: %s", resp.Auth.Policies)
+ }
+}
+
+func TestTokenStore_HandleRequest_CreateToken_NonRoot_NoParent(t *testing.T) {
+ _, ts, _, root := TestCoreWithTokenStore(t)
+ testMakeToken(t, ts, root, "client", "", []string{"foo"})
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "create")
+ req.ClientToken = "client"
+ req.Data["no_parent"] = true
+ req.Data["policies"] = []string{"foo"}
+
+ resp, err := ts.HandleRequest(req)
+ if err != logical.ErrInvalidRequest {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp.Data["error"] != "root or sudo privileges required to create orphan token" {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
+
+func TestTokenStore_HandleRequest_CreateToken_Root_NoParent(t *testing.T) {
+ _, ts, _, root := TestCoreWithTokenStore(t)
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "create")
+ req.ClientToken = root
+ req.Data["no_parent"] = true
+ req.Data["policies"] = []string{"foo"}
+
+ resp, err := ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp.Auth.ClientToken == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ out, _ := ts.Lookup(resp.Auth.ClientToken)
+ if out.Parent != "" {
+ t.Fatalf("bad: %#v", out)
+ }
+}
+
+func TestTokenStore_HandleRequest_CreateToken_PathBased_NoParent(t *testing.T) {
+ _, ts, _, root := TestCoreWithTokenStore(t)
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "create-orphan")
+ req.ClientToken = root
+ req.Data["policies"] = []string{"foo"}
+
+ resp, err := ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp.Auth.ClientToken == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ out, _ := ts.Lookup(resp.Auth.ClientToken)
+ if out.Parent != "" {
+ t.Fatalf("bad: %#v", out)
+ }
+}
+
+func TestTokenStore_HandleRequest_CreateToken_Metadata(t *testing.T) {
+ _, ts, _, root := TestCoreWithTokenStore(t)
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "create")
+ req.ClientToken = root
+ req.Data["policies"] = []string{"foo"}
+ meta := map[string]string{
+ "user": "armon",
+ "source": "github",
+ }
+ req.Data["meta"] = meta
+
+ resp, err := ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp.Auth.ClientToken == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ out, _ := ts.Lookup(resp.Auth.ClientToken)
+ if !reflect.DeepEqual(out.Meta, meta) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v", meta, out.Meta)
+ }
+}
+
+func TestTokenStore_HandleRequest_CreateToken_Lease(t *testing.T) {
+ _, ts, _, root := TestCoreWithTokenStore(t)
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "create")
+ req.ClientToken = root
+ req.Data["policies"] = []string{"foo"}
+ req.Data["lease"] = "1h"
+
+ resp, err := ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp.Auth.ClientToken == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+ if resp.Auth.TTL != time.Hour {
+ t.Fatalf("bad: %#v", resp)
+ }
+ if !resp.Auth.Renewable {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
+
+func TestTokenStore_HandleRequest_CreateToken_TTL(t *testing.T) {
+ _, ts, _, root := TestCoreWithTokenStore(t)
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "create")
+ req.ClientToken = root
+ req.Data["policies"] = []string{"foo"}
+ req.Data["ttl"] = "1h"
+
+ resp, err := ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp.Auth.ClientToken == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+ if resp.Auth.TTL != time.Hour {
+ t.Fatalf("bad: %#v", resp)
+ }
+ if !resp.Auth.Renewable {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
+
+func TestTokenStore_HandleRequest_Revoke(t *testing.T) {
+ _, ts, _, root := TestCoreWithTokenStore(t)
+ testMakeToken(t, ts, root, "child", "", []string{"root", "foo"})
+ testMakeToken(t, ts, "child", "sub-child", "", []string{"foo"})
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "revoke")
+ req.Data = map[string]interface{}{
+ "token": "child",
+ }
+ resp, err := ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ out, err := ts.Lookup("child")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out != nil {
+ t.Fatalf("bad: %v", out)
+ }
+
+ // Sub-child should not exist
+ out, err = ts.Lookup("sub-child")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out != nil {
+ t.Fatalf("bad: %v", out)
+ }
+}
+
+func TestTokenStore_HandleRequest_RevokeOrphan(t *testing.T) {
+ _, ts, _, root := TestCoreWithTokenStore(t)
+ testMakeToken(t, ts, root, "child", "", []string{"root", "foo"})
+ testMakeToken(t, ts, "child", "sub-child", "", []string{"foo"})
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "revoke-orphan")
+ req.Data = map[string]interface{}{
+ "token": "child",
+ }
+ req.ClientToken = root
+ resp, err := ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ out, err := ts.Lookup("child")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out != nil {
+ t.Fatalf("bad: %v", out)
+ }
+
+ // Sub-child should exist!
+ out, err = ts.Lookup("sub-child")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out == nil {
+ t.Fatalf("bad: %v", out)
+ }
+}
+
+func TestTokenStore_HandleRequest_RevokeOrphan_NonRoot(t *testing.T) {
+ _, ts, _, root := TestCoreWithTokenStore(t)
+ testMakeToken(t, ts, root, "child", "", []string{"foo"})
+
+ out, err := ts.Lookup("child")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out == nil {
+ t.Fatalf("bad: %v", out)
+ }
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "revoke-orphan")
+ req.Data = map[string]interface{}{
+ "token": "child",
+ }
+ req.ClientToken = "child"
+ resp, err := ts.HandleRequest(req)
+ if err != logical.ErrInvalidRequest {
+ t.Fatalf("did not get error when non-root revoking itself with orphan flag; resp is %#v", resp)
+ }
+
+ // Should still exist
+ out, err = ts.Lookup("child")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out == nil {
+ t.Fatalf("bad: %v", out)
+ }
+}
+
+func TestTokenStore_HandleRequest_Lookup(t *testing.T) {
+ c, ts, _, root := TestCoreWithTokenStore(t)
+ req := logical.TestRequest(t, logical.UpdateOperation, "lookup")
+ req.Data = map[string]interface{}{
+ "token": root,
+ }
+ resp, err := ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp == nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ exp := map[string]interface{}{
+ "id": root,
+ "accessor": resp.Data["accessor"].(string),
+ "policies": []string{"root"},
+ "path": "auth/token/root",
+ "meta": map[string]string(nil),
+ "display_name": "root",
+ "orphan": true,
+ "num_uses": 0,
+ "creation_ttl": int64(0),
+ "ttl": int64(0),
+ "explicit_max_ttl": int64(0),
+ "expire_time": nil,
+ }
+
+ if resp.Data["creation_time"].(int64) == 0 {
+ t.Fatalf("creation time was zero")
+ }
+ delete(resp.Data, "creation_time")
+
+ if !reflect.DeepEqual(resp.Data, exp) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v", exp, resp.Data)
+ }
+
+ testCoreMakeToken(t, c, root, "client", "3600s", []string{"foo"})
+
+ // Test via GET
+ req = logical.TestRequest(t, logical.UpdateOperation, "lookup")
+ req.Data = map[string]interface{}{
+ "token": "client",
+ }
+ resp, err = ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp == nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ exp = map[string]interface{}{
+ "id": "client",
+ "accessor": resp.Data["accessor"],
+ "policies": []string{"default", "foo"},
+ "path": "auth/token/create",
+ "meta": map[string]string(nil),
+ "display_name": "token",
+ "orphan": false,
+ "num_uses": 0,
+ "creation_ttl": int64(3600),
+ "ttl": int64(3600),
+ "explicit_max_ttl": int64(0),
+ "renewable": true,
+ }
+
+ if resp.Data["creation_time"].(int64) == 0 {
+ t.Fatalf("creation time was zero")
+ }
+ delete(resp.Data, "creation_time")
+ if resp.Data["issue_time"].(time.Time).IsZero() {
+ t.Fatal("issue time is default time")
+ }
+ delete(resp.Data, "issue_time")
+ if resp.Data["expire_time"].(time.Time).IsZero() {
+ t.Fatal("expire time is default time")
+ }
+ delete(resp.Data, "expire_time")
+
+ // Depending on timing of the test this may have ticked down, so accept 3599
+ if resp.Data["ttl"].(int64) == 3599 {
+ resp.Data["ttl"] = int64(3600)
+ }
+
+ if !reflect.DeepEqual(resp.Data, exp) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v", exp, resp.Data)
+ }
+
+ // Test via POST
+ req = logical.TestRequest(t, logical.UpdateOperation, "lookup")
+ req.Data = map[string]interface{}{
+ "token": "client",
+ }
+ resp, err = ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp == nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ exp = map[string]interface{}{
+ "id": "client",
+ "accessor": resp.Data["accessor"],
+ "policies": []string{"default", "foo"},
+ "path": "auth/token/create",
+ "meta": map[string]string(nil),
+ "display_name": "token",
+ "orphan": false,
+ "num_uses": 0,
+ "creation_ttl": int64(3600),
+ "ttl": int64(3600),
+ "explicit_max_ttl": int64(0),
+ "renewable": true,
+ }
+
+ if resp.Data["creation_time"].(int64) == 0 {
+ t.Fatalf("creation time was zero")
+ }
+ delete(resp.Data, "creation_time")
+ if resp.Data["issue_time"].(time.Time).IsZero() {
+ t.Fatal("issue time is default time")
+ }
+ delete(resp.Data, "issue_time")
+ if resp.Data["expire_time"].(time.Time).IsZero() {
+ t.Fatal("expire time is default time")
+ }
+ delete(resp.Data, "expire_time")
+
+ // Depending on timing of the test this may have ticked down, so accept 3599
+ if resp.Data["ttl"].(int64) == 3599 {
+ resp.Data["ttl"] = int64(3600)
+ }
+
+ if !reflect.DeepEqual(resp.Data, exp) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v", exp, resp.Data)
+ }
+
+ // Test last_renewal_time functionality
+ req = logical.TestRequest(t, logical.UpdateOperation, "renew")
+ req.Data = map[string]interface{}{
+ "token": "client",
+ }
+ resp, err = ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp == nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ req = logical.TestRequest(t, logical.UpdateOperation, "lookup")
+ req.Data = map[string]interface{}{
+ "token": "client",
+ }
+ resp, err = ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp == nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ if resp.Data["last_renewal_time"].(int64) == 0 {
+ t.Fatalf("last_renewal_time was zero")
+ }
+}
+
+func TestTokenStore_HandleRequest_LookupSelf(t *testing.T) {
+ c, ts, _, root := TestCoreWithTokenStore(t)
+ testCoreMakeToken(t, c, root, "client", "3600s", []string{"foo"})
+
+ req := logical.TestRequest(t, logical.ReadOperation, "lookup-self")
+ req.ClientToken = "client"
+ resp, err := ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp == nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ exp := map[string]interface{}{
+ "id": "client",
+ "accessor": resp.Data["accessor"],
+ "policies": []string{"default", "foo"},
+ "path": "auth/token/create",
+ "meta": map[string]string(nil),
+ "display_name": "token",
+ "orphan": false,
+ "renewable": true,
+ "num_uses": 0,
+ "creation_ttl": int64(3600),
+ "ttl": int64(3600),
+ "explicit_max_ttl": int64(0),
+ }
+
+ if resp.Data["creation_time"].(int64) == 0 {
+ t.Fatalf("creation time was zero")
+ }
+ delete(resp.Data, "creation_time")
+ if resp.Data["issue_time"].(time.Time).IsZero() {
+ t.Fatalf("creation time was zero")
+ }
+ delete(resp.Data, "issue_time")
+ if resp.Data["expire_time"].(time.Time).IsZero() {
+ t.Fatalf("expire time was zero")
+ }
+ delete(resp.Data, "expire_time")
+
+ // Depending on timing of the test this may have ticked down, so accept 3599
+ if resp.Data["ttl"].(int64) == 3599 {
+ resp.Data["ttl"] = int64(3600)
+ }
+
+ if !reflect.DeepEqual(resp.Data, exp) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v", exp, resp.Data)
+ }
+}
+
+func TestTokenStore_HandleRequest_Renew(t *testing.T) {
+ exp := mockExpiration(t)
+ ts := exp.tokenStore
+
+ // Create new token
+ root, err := ts.rootToken()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Create a new token
+ auth := &logical.Auth{
+ ClientToken: root.ID,
+ LeaseOptions: logical.LeaseOptions{
+ TTL: time.Hour,
+ Renewable: true,
+ },
+ }
+ err = exp.RegisterAuth("auth/token/root", auth)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Get the original expire time to compare
+ originalExpire := auth.ExpirationTime()
+
+ beforeRenew := time.Now()
+ req := logical.TestRequest(t, logical.UpdateOperation, "renew")
+ req.Data = map[string]interface{}{
+ "token": root.ID,
+ "increment": "3600s",
+ }
+
+ req.Data["increment"] = "3600s"
+ resp, err := ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+
+ // Get the new expire time
+ newExpire := resp.Auth.ExpirationTime()
+ if newExpire.Before(originalExpire) {
+ t.Fatalf("should expire later: %s %s", newExpire, originalExpire)
+ }
+ if newExpire.Before(beforeRenew.Add(time.Hour)) {
+ t.Fatalf("should have at least an hour: %s %s", newExpire, beforeRenew)
+ }
+}
+
+func TestTokenStore_HandleRequest_RenewSelf(t *testing.T) {
+ exp := mockExpiration(t)
+ ts := exp.tokenStore
+
+ // Create new token
+ root, err := ts.rootToken()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Create a new token
+ auth := &logical.Auth{
+ ClientToken: root.ID,
+ LeaseOptions: logical.LeaseOptions{
+ TTL: time.Hour,
+ Renewable: true,
+ },
+ }
+ err = exp.RegisterAuth("auth/token/root", auth)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Get the original expire time to compare
+ originalExpire := auth.ExpirationTime()
+
+ beforeRenew := time.Now()
+ req := logical.TestRequest(t, logical.UpdateOperation, "renew-self")
+ req.ClientToken = auth.ClientToken
+ req.Data["increment"] = "3600s"
+ resp, err := ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+
+ // Get the new expire time
+ newExpire := resp.Auth.ExpirationTime()
+ if newExpire.Before(originalExpire) {
+ t.Fatalf("should expire later: %s %s", newExpire, originalExpire)
+ }
+ if newExpire.Before(beforeRenew.Add(time.Hour)) {
+ t.Fatalf("should have at least an hour: %s %s", newExpire, beforeRenew)
+ }
+}
+
+func TestTokenStore_RoleCRUD(t *testing.T) {
+ core, _, _, root := TestCoreWithTokenStore(t)
+
+ req := logical.TestRequest(t, logical.ReadOperation, "auth/token/roles/test")
+ req.ClientToken = root
+
+ resp, err := core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp != nil {
+ t.Fatalf("should not see a role")
+ }
+
+ // First test creation
+ req.Operation = logical.CreateOperation
+ req.Data = map[string]interface{}{
+ "orphan": true,
+ "period": "72h",
+ "allowed_policies": "test1,test2",
+ "path_suffix": "happenin",
+ }
+
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp != nil {
+ t.Fatalf("expected a nil response")
+ }
+
+ req.Operation = logical.ReadOperation
+ req.Data = map[string]interface{}{}
+
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp == nil {
+ t.Fatalf("got a nil response")
+ }
+
+ expected := map[string]interface{}{
+ "name": "test",
+ "orphan": true,
+ "period": int64(259200),
+ "allowed_policies": []string{"test1", "test2"},
+ "disallowed_policies": []string{},
+ "path_suffix": "happenin",
+ "explicit_max_ttl": int64(0),
+ "renewable": true,
+ }
+
+ if !reflect.DeepEqual(expected, resp.Data) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v", expected, resp.Data)
+ }
+
+ // Now test updating; this should be set to an UpdateOperation
+ // automatically due to the existence check
+ req.Operation = logical.CreateOperation
+ req.Data = map[string]interface{}{
+ "period": "79h",
+ "allowed_policies": "test3",
+ "path_suffix": "happenin",
+ "renewable": false,
+ }
+
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp != nil {
+ t.Fatalf("expected a nil response")
+ }
+
+ req.Operation = logical.ReadOperation
+ req.Data = map[string]interface{}{}
+
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp == nil {
+ t.Fatalf("got a nil response")
+ }
+
+ expected = map[string]interface{}{
+ "name": "test",
+ "orphan": true,
+ "period": int64(284400),
+ "allowed_policies": []string{"test3"},
+ "disallowed_policies": []string{},
+ "path_suffix": "happenin",
+ "explicit_max_ttl": int64(0),
+ "renewable": false,
+ }
+
+ if !reflect.DeepEqual(expected, resp.Data) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v", expected, resp.Data)
+ }
+
+ // Now set explicit max ttl and clear the period
+ req.Operation = logical.CreateOperation
+ req.Data = map[string]interface{}{
+ "explicit_max_ttl": "5",
+ "period": "0s",
+ }
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+
+ req.Operation = logical.ReadOperation
+ req.Data = map[string]interface{}{}
+
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp == nil {
+ t.Fatalf("got a nil response")
+ }
+
+ expected = map[string]interface{}{
+ "name": "test",
+ "orphan": true,
+ "explicit_max_ttl": int64(5),
+ "allowed_policies": []string{"test3"},
+ "disallowed_policies": []string{},
+ "path_suffix": "happenin",
+ "period": int64(0),
+ "renewable": false,
+ }
+
+ if !reflect.DeepEqual(expected, resp.Data) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v", expected, resp.Data)
+ }
+
+ req.Operation = logical.ListOperation
+ req.Path = "auth/token/roles"
+ req.Data = map[string]interface{}{}
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp == nil {
+ t.Fatalf("got a nil response")
+ }
+ keysInt, ok := resp.Data["keys"]
+ if !ok {
+ t.Fatalf("did not find keys in response")
+ }
+ keys, ok := keysInt.([]string)
+ if !ok {
+ t.Fatalf("could not convert keys interface to key list")
+ }
+ if len(keys) != 1 {
+ t.Fatalf("unexpected number of keys: %d", len(keys))
+ }
+ if keys[0] != "test" {
+ t.Fatalf("expected \"test\", got \"%s\"", keys[0])
+ }
+
+ req.Operation = logical.DeleteOperation
+ req.Path = "auth/token/roles/test"
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp != nil {
+ t.Fatalf("expected a nil response")
+ }
+
+ req.Operation = logical.ReadOperation
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp != nil {
+ t.Fatalf("expected a nil response")
+ }
+}
+
+func TestTokenStore_RoleDisallowedPoliciesWithRoot(t *testing.T) {
+ var resp *logical.Response
+ var err error
+
+ _, ts, _, root := TestCoreWithTokenStore(t)
+
+ // Don't set disallowed_policies. Verify that a read on the role does return a non-nil value.
+ roleReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "roles/role1",
+ Data: map[string]interface{}{
+ "disallowed_policies": "root,testpolicy",
+ },
+ ClientToken: root,
+ }
+ resp, err = ts.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%v", err, resp)
+ }
+
+ roleReq.Operation = logical.ReadOperation
+ resp, err = ts.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%v", err, resp)
+ }
+
+ expected := []string{"root", "testpolicy"}
+ if !reflect.DeepEqual(resp.Data["disallowed_policies"], expected) {
+ t.Fatalf("bad: expected: %#v, actual: %#v", expected, resp.Data["disallowed_policies"])
+ }
+}
+
+func TestTokenStore_RoleDisallowedPolicies(t *testing.T) {
+ var req *logical.Request
+ var resp *logical.Response
+ var err error
+
+ core, ts, _, root := TestCoreWithTokenStore(t)
+ ps := core.policyStore
+
+ // Create 3 different policies
+ policy, _ := Parse(tokenCreationPolicy)
+ policy.Name = "test1"
+ if err := ps.SetPolicy(policy); err != nil {
+ t.Fatal(err)
+ }
+
+ policy, _ = Parse(tokenCreationPolicy)
+ policy.Name = "test2"
+ if err := ps.SetPolicy(policy); err != nil {
+ t.Fatal(err)
+ }
+
+ policy, _ = Parse(tokenCreationPolicy)
+ policy.Name = "test3"
+ if err := ps.SetPolicy(policy); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create roles with different disallowed_policies configuration
+ req = logical.TestRequest(t, logical.UpdateOperation, "roles/test1")
+ req.ClientToken = root
+ req.Data = map[string]interface{}{
+ "disallowed_policies": "test1",
+ }
+ resp, err = ts.HandleRequest(req)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%v", err, resp)
+ }
+
+ req = logical.TestRequest(t, logical.UpdateOperation, "roles/test23")
+ req.ClientToken = root
+ req.Data = map[string]interface{}{
+ "disallowed_policies": "test2,test3",
+ }
+ resp, err = ts.HandleRequest(req)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%v", err, resp)
+ }
+
+ req = logical.TestRequest(t, logical.UpdateOperation, "roles/test123")
+ req.ClientToken = root
+ req.Data = map[string]interface{}{
+ "disallowed_policies": "test1,test2,test3",
+ }
+ resp, err = ts.HandleRequest(req)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%v", err, resp)
+ }
+
+ // Create a token that has all the policies defined above
+ req = logical.TestRequest(t, logical.UpdateOperation, "create")
+ req.ClientToken = root
+ req.Data["policies"] = []string{"test1", "test2", "test3"}
+ resp, err = ts.HandleRequest(req)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%v", err, resp)
+ }
+ if resp == nil || resp.Auth == nil {
+ t.Fatal("got nil response")
+ }
+ if resp.Auth.ClientToken == "" {
+ t.Fatalf("bad: ClientToken; resp:%#v", resp)
+ }
+ parentToken := resp.Auth.ClientToken
+
+ req = logical.TestRequest(t, logical.UpdateOperation, "create/test1")
+ req.ClientToken = parentToken
+ resp, err = ts.HandleRequest(req)
+ if err == nil || resp != nil && !resp.IsError() {
+ t.Fatalf("expected an error response, got %#v", resp)
+ }
+
+ req = logical.TestRequest(t, logical.UpdateOperation, "create/test23")
+ req.ClientToken = parentToken
+ resp, err = ts.HandleRequest(req)
+ if err == nil || resp != nil && !resp.IsError() {
+ t.Fatal("expected an error response")
+ }
+
+ req = logical.TestRequest(t, logical.UpdateOperation, "create/test123")
+ req.ClientToken = parentToken
+ resp, err = ts.HandleRequest(req)
+ if err == nil || resp != nil && !resp.IsError() {
+ t.Fatal("expected an error response")
+ }
+
+ // Disallowed should act as a blacklist so make sure we can still make
+ // something with other policies in the request
+ req = logical.TestRequest(t, logical.UpdateOperation, "create/test123")
+ req.Data["policies"] = []string{"foo", "bar"}
+ req.ClientToken = parentToken
+ resp, err = ts.HandleRequest(req)
+ if err != nil || resp == nil || resp.IsError() {
+ t.Fatalf("err:%v resp:%v", err, resp)
+ }
+
+ // Create a role to have 'default' policy disallowed
+ req = logical.TestRequest(t, logical.UpdateOperation, "roles/default")
+ req.ClientToken = root
+ req.Data = map[string]interface{}{
+ "disallowed_policies": "default",
+ }
+ resp, err = ts.HandleRequest(req)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%v", err, resp)
+ }
+
+ req = logical.TestRequest(t, logical.UpdateOperation, "create/default")
+ req.ClientToken = parentToken
+ resp, err = ts.HandleRequest(req)
+ if err == nil || resp != nil && !resp.IsError() {
+ t.Fatal("expected an error response")
+ }
+}
+
+func TestTokenStore_RoleAllowedPolicies(t *testing.T) {
+ _, ts, _, root := TestCoreWithTokenStore(t)
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "roles/test")
+ req.ClientToken = root
+ req.Data = map[string]interface{}{
+ "allowed_policies": "test1,test2",
+ }
+
+ resp, err := ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp != nil {
+ t.Fatalf("expected a nil response")
+ }
+
+ req.Data = map[string]interface{}{}
+
+ req.Path = "create/test"
+ req.Data["policies"] = []string{"foo"}
+ resp, err = ts.HandleRequest(req)
+ if err == nil {
+ t.Fatalf("expected error")
+ }
+
+ req.Data["policies"] = []string{"test2"}
+ resp, err = ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp.Auth.ClientToken == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // When allowed_policies is blank, should fall back to a subset of the parent policies
+ req = logical.TestRequest(t, logical.UpdateOperation, "roles/test")
+ req.ClientToken = root
+ req.Data = map[string]interface{}{
+ "allowed_policies": "",
+ }
+ resp, err = ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp != nil {
+ t.Fatalf("expected a nil response")
+ }
+
+ req = logical.TestRequest(t, logical.UpdateOperation, "create")
+ req.ClientToken = root
+ req.Data["policies"] = []string{"test1", "test2", "test3"}
+ resp, err = ts.HandleRequest(req)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%v", err, resp)
+ }
+ if resp == nil || resp.Auth == nil {
+ t.Fatal("got nil response")
+ }
+ if resp.Auth.ClientToken == "" {
+ t.Fatalf("bad: ClientToken; resp:%#v", resp)
+ }
+ if !reflect.DeepEqual(resp.Auth.Policies, []string{"default", "test1", "test2", "test3"}) {
+ t.Fatalf("bad: %#v", resp.Auth.Policies)
+ }
+ parentToken := resp.Auth.ClientToken
+
+ req.Data = map[string]interface{}{}
+ req.ClientToken = parentToken
+
+ req.Path = "create/test"
+ req.Data["policies"] = []string{"foo"}
+ resp, err = ts.HandleRequest(req)
+ if err == nil {
+ t.Fatalf("expected error")
+ }
+
+ req.Data["policies"] = []string{"test2"}
+ resp, err = ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp.Auth.ClientToken == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ delete(req.Data, "policies")
+ resp, err = ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp.Auth.ClientToken == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+ if !reflect.DeepEqual(resp.Auth.Policies, []string{"default", "test1", "test2", "test3"}) {
+ t.Fatalf("bad: %#v", resp.Auth.Policies)
+ }
+}
+
+func TestTokenStore_RoleOrphan(t *testing.T) {
+ _, ts, _, root := TestCoreWithTokenStore(t)
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "roles/test")
+ req.ClientToken = root
+ req.Data = map[string]interface{}{
+ "orphan": true,
+ }
+
+ resp, err := ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp != nil {
+ t.Fatalf("expected a nil response")
+ }
+
+ req.Path = "create/test"
+ resp, err = ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp.Auth.ClientToken == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ out, err := ts.Lookup(resp.Auth.ClientToken)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if out.Parent != "" {
+ t.Fatalf("expected orphan token, but found a parent")
+ }
+
+ if !strings.HasPrefix(out.Path, "auth/token/create/test") {
+ t.Fatalf("expected role in path but did not find it")
+ }
+}
+
+func TestTokenStore_RolePathSuffix(t *testing.T) {
+ _, ts, _, root := TestCoreWithTokenStore(t)
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "roles/test")
+ req.ClientToken = root
+ req.Data = map[string]interface{}{
+ "path_suffix": "happenin",
+ }
+
+ resp, err := ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp != nil {
+ t.Fatalf("expected a nil response")
+ }
+
+ req.Path = "create/test"
+ resp, err = ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp.Auth.ClientToken == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ out, err := ts.Lookup(resp.Auth.ClientToken)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if out.Path != "auth/token/create/test/happenin" {
+ t.Fatalf("expected role in path but did not find it")
+ }
+}
+
+func TestTokenStore_RolePeriod(t *testing.T) {
+ core, _, _, root := TestCoreWithTokenStore(t)
+
+ core.defaultLeaseTTL = 10 * time.Second
+ core.maxLeaseTTL = 10 * time.Second
+
+ // Note: these requests are sent to Core since Core handles registration
+ // with the expiration manager and we need the storage to be consistent
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "auth/token/roles/test")
+ req.ClientToken = root
+ req.Data = map[string]interface{}{
+ "period": 300,
+ }
+
+ resp, err := core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp != nil {
+ t.Fatalf("expected a nil response")
+ }
+
+ // This first set of logic is to verify that a normal non-root token will
+ // be given a TTL of 10 seconds, and that renewing will not cause the TTL to
+ // increase since that's the configured backend max. Then we verify that
+ // increment works.
+ {
+ req.Path = "auth/token/create"
+ req.Data = map[string]interface{}{
+ "policies": []string{"default"},
+ }
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp.Auth.ClientToken == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ req.ClientToken = resp.Auth.ClientToken
+ req.Operation = logical.ReadOperation
+ req.Path = "auth/token/lookup-self"
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ ttl := resp.Data["ttl"].(int64)
+ if ttl > 10 {
+ t.Fatalf("TTL too large")
+ }
+
+ // Let the TTL go down a bit to 8 seconds
+ time.Sleep(2 * time.Second)
+
+ req.Operation = logical.UpdateOperation
+ req.Path = "auth/token/renew-self"
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+
+ req.Operation = logical.ReadOperation
+ req.Path = "auth/token/lookup-self"
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ ttl = resp.Data["ttl"].(int64)
+ if ttl > 8 {
+ t.Fatalf("TTL too large")
+ }
+
+ // Renewing should not have the increment increase since we've hit the
+ // max
+ req.Operation = logical.UpdateOperation
+ req.Path = "auth/token/renew-self"
+ req.Data = map[string]interface{}{
+ "increment": 1,
+ }
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+
+ req.Operation = logical.ReadOperation
+ req.Path = "auth/token/lookup-self"
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ ttl = resp.Data["ttl"].(int64)
+ if ttl > 8 {
+ t.Fatalf("TTL too large")
+ }
+ }
+
+ // Now we create a token against the role. We should be able to renew;
+ // increment should be ignored as well.
+ {
+ req.ClientToken = root
+ req.Operation = logical.UpdateOperation
+ req.Path = "auth/token/create/test"
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp == nil {
+ t.Fatal("response was nil")
+ }
+ if resp.Auth == nil {
+ t.Fatalf(fmt.Sprintf("response auth was nil, resp is %#v", *resp))
+ }
+ if resp.Auth.ClientToken == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ req.ClientToken = resp.Auth.ClientToken
+ req.Operation = logical.ReadOperation
+ req.Path = "auth/token/lookup-self"
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ ttl := resp.Data["ttl"].(int64)
+ if ttl < 299 {
+ t.Fatalf("TTL too small (expected %d, got %d", 299, ttl)
+ }
+
+ // Let the TTL go down a bit to 3 seconds
+ time.Sleep(3 * time.Second)
+
+ req.Operation = logical.UpdateOperation
+ req.Path = "auth/token/renew-self"
+ req.Data = map[string]interface{}{
+ "increment": 1,
+ }
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+
+ req.Operation = logical.ReadOperation
+ req.Path = "auth/token/lookup-self"
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ ttl = resp.Data["ttl"].(int64)
+ if ttl < 299 {
+ t.Fatalf("TTL too small (expected %d, got %d", 299, ttl)
+ }
+ }
+}
+
+func TestTokenStore_RoleExplicitMaxTTL(t *testing.T) {
+ core, _, _, root := TestCoreWithTokenStore(t)
+
+ core.defaultLeaseTTL = 5 * time.Second
+ core.maxLeaseTTL = 5 * time.Hour
+
+ // Note: these requests are sent to Core since Core handles registration
+ // with the expiration manager and we need the storage to be consistent
+
+ // Make sure we can't make it larger than the system/mount max; we should get a warning on role write and an error on token creation
+ req := logical.TestRequest(t, logical.UpdateOperation, "auth/token/roles/test")
+ req.ClientToken = root
+ req.Data = map[string]interface{}{
+ "explicit_max_ttl": "100h",
+ }
+
+ resp, err := core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp == nil {
+ t.Fatalf("expected a warning")
+ }
+
+ req.Operation = logical.UpdateOperation
+ req.Path = "auth/token/create/test"
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("expected an error")
+ }
+ if len(resp.Warnings()) == 0 {
+ t.Fatalf("expected a warning")
+ }
+
+ // Reset to a good explicit max
+ req = logical.TestRequest(t, logical.UpdateOperation, "auth/token/roles/test")
+ req.ClientToken = root
+ req.Data = map[string]interface{}{
+ "explicit_max_ttl": "10s",
+ }
+
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp != nil {
+ t.Fatalf("expected a nil response")
+ }
+
+ // This first set of logic is to verify that a normal non-root token will
+ // be given a TTL of 5 seconds, and that renewing will cause the TTL to
+ // increase
+ {
+ req.Path = "auth/token/create"
+ req.Data = map[string]interface{}{
+ "policies": []string{"default"},
+ }
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp.Auth.ClientToken == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ req.ClientToken = resp.Auth.ClientToken
+ req.Operation = logical.ReadOperation
+ req.Path = "auth/token/lookup-self"
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ ttl := resp.Data["ttl"].(int64)
+ if ttl > 5 {
+ t.Fatalf("TTL too large")
+ }
+
+ // Let the TTL go down a bit to 3 seconds
+ time.Sleep(2 * time.Second)
+
+ req.Operation = logical.UpdateOperation
+ req.Path = "auth/token/renew-self"
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+
+ req.Operation = logical.ReadOperation
+ req.Path = "auth/token/lookup-self"
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ ttl = resp.Data["ttl"].(int64)
+ if ttl < 4 {
+ t.Fatalf("TTL too small after renewal")
+ }
+ }
+
+ // Now we create a token against the role. After renew our max should still
+ // be the same.
+ {
+ req.ClientToken = root
+ req.Operation = logical.UpdateOperation
+ req.Path = "auth/token/create/test"
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp == nil {
+ t.Fatal("response was nil")
+ }
+ if resp.Auth == nil {
+ t.Fatalf(fmt.Sprintf("response auth was nil, resp is %#v", *resp))
+ }
+ if resp.Auth.ClientToken == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ req.ClientToken = resp.Auth.ClientToken
+ req.Operation = logical.ReadOperation
+ req.Path = "auth/token/lookup-self"
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ ttl := resp.Data["ttl"].(int64)
+ if ttl > 10 {
+ t.Fatalf("TTL too big")
+ }
+ maxTTL := resp.Data["explicit_max_ttl"].(int64)
+ if maxTTL != 10 {
+ t.Fatalf("expected 6 for explicit max TTL, got %d", maxTTL)
+ }
+
+ // Let the TTL go down a bit to ~7 seconds (8 against explicit max)
+ time.Sleep(2 * time.Second)
+
+ req.Operation = logical.UpdateOperation
+ req.Path = "auth/token/renew-self"
+ req.Data = map[string]interface{}{
+ "increment": 300,
+ }
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+
+ req.Operation = logical.ReadOperation
+ req.Path = "auth/token/lookup-self"
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ ttl = resp.Data["ttl"].(int64)
+ if ttl > 8 {
+ t.Fatalf("TTL too big")
+ }
+
+ // Let the TTL go down a bit more to ~5 seconds (6 against explicit max)
+ time.Sleep(2 * time.Second)
+
+ req.Operation = logical.UpdateOperation
+ req.Path = "auth/token/renew-self"
+ req.Data = map[string]interface{}{
+ "increment": 300,
+ }
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+
+ req.Operation = logical.ReadOperation
+ req.Path = "auth/token/lookup-self"
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ ttl = resp.Data["ttl"].(int64)
+ if ttl > 6 {
+ t.Fatalf("TTL too big")
+ }
+
+ // It should expire
+ time.Sleep(8 * time.Second)
+
+ req.Operation = logical.UpdateOperation
+ req.Path = "auth/token/renew-self"
+ req.Data = map[string]interface{}{
+ "increment": 300,
+ }
+ resp, err = core.HandleRequest(req)
+ if err == nil {
+ t.Fatalf("expected error")
+ }
+
+ req.Operation = logical.ReadOperation
+ req.Path = "auth/token/lookup-self"
+ resp, err = core.HandleRequest(req)
+ if err == nil {
+ t.Fatalf("expected error")
+ }
+ }
+}
+
+func TestTokenStore_Periodic(t *testing.T) {
+ core, _, _, root := TestCoreWithTokenStore(t)
+
+ core.defaultLeaseTTL = 10 * time.Second
+ core.maxLeaseTTL = 10 * time.Second
+
+ // Note: these requests are sent to Core since Core handles registration
+ // with the expiration manager and we need the storage to be consistent
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "auth/token/roles/test")
+ req.ClientToken = root
+ req.Data = map[string]interface{}{
+ "period": 300,
+ }
+
+ resp, err := core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp != nil {
+ t.Fatalf("expected a nil response")
+ }
+
+ // First make one directly and verify on renew it uses the period.
+ {
+ req.ClientToken = root
+ req.Operation = logical.UpdateOperation
+ req.Path = "auth/token/create"
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil {
+ t.Fatal("response was nil")
+ }
+ if resp.Auth == nil {
+ t.Fatalf(fmt.Sprintf("response auth was nil, resp is %#v", *resp))
+ }
+ if resp.Auth.ClientToken == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ req.ClientToken = resp.Auth.ClientToken
+ req.Operation = logical.ReadOperation
+ req.Path = "auth/token/lookup-self"
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ ttl := resp.Data["ttl"].(int64)
+ if ttl < 299 {
+ t.Fatalf("TTL too small (expected %d, got %d)", 299, ttl)
+ }
+
+ // Let the TTL go down a bit
+ time.Sleep(2 * time.Second)
+
+ req.Operation = logical.UpdateOperation
+ req.Path = "auth/token/renew-self"
+ req.Data = map[string]interface{}{
+ "increment": 1,
+ }
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+
+ req.Operation = logical.ReadOperation
+ req.Path = "auth/token/lookup-self"
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ ttl = resp.Data["ttl"].(int64)
+ if ttl < 299 {
+ t.Fatalf("TTL too small (expected %d, got %d)", 299, ttl)
+ }
+ }
+
+ // Do the same with an explicit max TTL
+ {
+ req.ClientToken = root
+ req.Operation = logical.UpdateOperation
+ req.Path = "auth/token/create"
+ req.Data = map[string]interface{}{
+ "period": 300,
+ "explicit_max_ttl": 150,
+ }
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil {
+ t.Fatal("response was nil")
+ }
+ if resp.Auth == nil {
+ t.Fatalf(fmt.Sprintf("response auth was nil, resp is %#v", *resp))
+ }
+ if resp.Auth.ClientToken == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ req.ClientToken = resp.Auth.ClientToken
+ req.Operation = logical.ReadOperation
+ req.Path = "auth/token/lookup-self"
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ ttl := resp.Data["ttl"].(int64)
+ if ttl < 149 || ttl > 150 {
+ t.Fatalf("TTL bad (expected %d, got %d)", 149, ttl)
+ }
+
+ // Let the TTL go down a bit
+ time.Sleep(2 * time.Second)
+
+ req.Operation = logical.UpdateOperation
+ req.Path = "auth/token/renew-self"
+ req.Data = map[string]interface{}{
+ "increment": 76,
+ }
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+
+ req.Operation = logical.ReadOperation
+ req.Path = "auth/token/lookup-self"
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ ttl = resp.Data["ttl"].(int64)
+ if ttl < 140 || ttl > 150 {
+ t.Fatalf("TTL bad (expected around %d, got %d)", 145, ttl)
+ }
+ }
+
+ // Now we create a token against the role and also set the te value
+ // directly. We should use the smaller of the two and be able to renew;
+ // increment should be ignored as well.
+ {
+ req.ClientToken = root
+ req.Operation = logical.UpdateOperation
+ req.Path = "auth/token/create/test"
+ req.Data = map[string]interface{}{
+ "period": 150,
+ }
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp == nil {
+ t.Fatal("response was nil")
+ }
+ if resp.Auth == nil {
+ t.Fatalf(fmt.Sprintf("response auth was nil, resp is %#v", *resp))
+ }
+ if resp.Auth.ClientToken == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ req.ClientToken = resp.Auth.ClientToken
+ req.Operation = logical.ReadOperation
+ req.Path = "auth/token/lookup-self"
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ ttl := resp.Data["ttl"].(int64)
+ if ttl < 149 || ttl > 150 {
+ t.Fatalf("TTL bad (expected %d, got %d)", 149, ttl)
+ }
+
+ // Let the TTL go down a bit
+ time.Sleep(2 * time.Second)
+
+ req.Operation = logical.UpdateOperation
+ req.Path = "auth/token/renew-self"
+ req.Data = map[string]interface{}{
+ "increment": 1,
+ }
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+
+ req.Operation = logical.ReadOperation
+ req.Path = "auth/token/lookup-self"
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ ttl = resp.Data["ttl"].(int64)
+ if ttl < 149 {
+ t.Fatalf("TTL bad (expected %d, got %d)", 149, ttl)
+ }
+ }
+
+ // Now do the same, also using an explicit max in the role
+ {
+ req.Path = "auth/token/roles/test"
+ req.ClientToken = root
+ req.Data = map[string]interface{}{
+ "period": 300,
+ "explicit_max_ttl": 150,
+ }
+
+ req.ClientToken = root
+ req.Operation = logical.UpdateOperation
+ req.Path = "auth/token/create/test"
+ req.Data = map[string]interface{}{
+ "period": 150,
+ "explicit_max_ttl": 130,
+ }
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+ if resp == nil {
+ t.Fatal("response was nil")
+ }
+ if resp.Auth == nil {
+ t.Fatalf(fmt.Sprintf("response auth was nil, resp is %#v", *resp))
+ }
+ if resp.Auth.ClientToken == "" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ req.ClientToken = resp.Auth.ClientToken
+ req.Operation = logical.ReadOperation
+ req.Path = "auth/token/lookup-self"
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ ttl := resp.Data["ttl"].(int64)
+ if ttl < 129 || ttl > 130 {
+ t.Fatalf("TTL bad (expected %d, got %d)", 129, ttl)
+ }
+
+ // Let the TTL go down a bit
+ time.Sleep(4 * time.Second)
+
+ req.Operation = logical.UpdateOperation
+ req.Path = "auth/token/renew-self"
+ req.Data = map[string]interface{}{
+ "increment": 1,
+ }
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+
+ req.Operation = logical.ReadOperation
+ req.Path = "auth/token/lookup-self"
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ ttl = resp.Data["ttl"].(int64)
+ if ttl > 127 {
+ t.Fatalf("TTL bad (expected < %d, got %d)", 128, ttl)
+ }
+ }
+}
+
+func TestTokenStore_NoDefaultPolicy(t *testing.T) {
+ var resp *logical.Response
+ var err error
+
+ core, ts, _, root := TestCoreWithTokenStore(t)
+ ps := core.policyStore
+ policy, _ := Parse(tokenCreationPolicy)
+ policy.Name = "policy1"
+ if err := ps.SetPolicy(policy); err != nil {
+ t.Fatal(err)
+ }
+
+ // Root token creates a token with desired policy. The created token
+ // should also have 'default' attached to it.
+ tokenData := map[string]interface{}{
+ "policies": []string{"policy1"},
+ }
+ tokenReq := &logical.Request{
+ Path: "create",
+ ClientToken: root,
+ Operation: logical.UpdateOperation,
+ Data: tokenData,
+ }
+ resp, err = ts.HandleRequest(tokenReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err: %v, resp: %v", err, resp)
+ }
+ if !reflect.DeepEqual(resp.Auth.Policies, []string{"default", "policy1"}) {
+ t.Fatalf("bad: policies: expected: [policy, default]; actual: %s", resp.Auth.Policies)
+ }
+
+ newToken := resp.Auth.ClientToken
+
+ // Root token creates a token with desired policy, but also requests
+ // that the token to not have 'default' policy. The resulting token
+ // should not have 'default' policy on it.
+ tokenData["no_default_policy"] = true
+ resp, err = ts.HandleRequest(tokenReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err: %v, resp: %v", err, resp)
+ }
+
+ if !reflect.DeepEqual(resp.Auth.Policies, []string{"policy1"}) {
+ t.Fatalf("bad: policies: expected: [policy1]; actual: %s", resp.Auth.Policies)
+ }
+
+ // A non-root token which has 'default' policy attached requests for a
+ // child token. Child token should also have 'default' policy attached.
+ tokenReq.ClientToken = newToken
+ tokenReq.Data = nil
+ resp, err = ts.HandleRequest(tokenReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err: %v, resp: %v", err, resp)
+ }
+
+ if !reflect.DeepEqual(resp.Auth.Policies, []string{"default", "policy1"}) {
+ t.Fatalf("bad: policies: expected: [default policy1]; actual: %s", resp.Auth.Policies)
+ }
+
+ // A non-root token which has 'default' policy attached, request for a
+ // child token to not have 'default' policy while not sending a list
+ tokenReq.Data = map[string]interface{}{
+ "no_default_policy": true,
+ }
+ resp, err = ts.HandleRequest(tokenReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err: %v, resp: %v", err, resp)
+ }
+
+ if !reflect.DeepEqual(resp.Auth.Policies, []string{"policy1"}) {
+ t.Fatalf("bad: policies: expected: [policy1]; actual: %s", resp.Auth.Policies)
+ }
+
+ // In this case "default" shouldn't exist because we are not inheriting
+ // parent policies
+ tokenReq.Data = map[string]interface{}{
+ "policies": []string{"policy1"},
+ "no_default_policy": true,
+ }
+ resp, err = ts.HandleRequest(tokenReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err: %v, resp: %v", err, resp)
+ }
+
+ if !reflect.DeepEqual(resp.Auth.Policies, []string{"policy1"}) {
+ t.Fatalf("bad: policies: expected: [policy1]; actual: %s", resp.Auth.Policies)
+ }
+
+ // This is a non-root token which does not have 'default' policy
+ // attached
+ newToken = resp.Auth.ClientToken
+ tokenReq.Data = nil
+ tokenReq.ClientToken = newToken
+ resp, err = ts.HandleRequest(tokenReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err: %v, resp: %v", err, resp)
+ }
+
+ if !reflect.DeepEqual(resp.Auth.Policies, []string{"policy1"}) {
+ t.Fatalf("bad: policies: expected: [policy1]; actual: %s", resp.Auth.Policies)
+ }
+
+ roleReq := &logical.Request{
+ ClientToken: root,
+ Path: "roles/role1",
+ Operation: logical.CreateOperation,
+ }
+ resp, err = ts.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err: %v, resp: %v", err, resp)
+ }
+ tokenReq.Path = "create/role1"
+ tokenReq.Data = map[string]interface{}{
+ "policies": []string{"policy1"},
+ }
+ resp, err = ts.HandleRequest(tokenReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err: %v, resp: %v", err, resp)
+ }
+ if !reflect.DeepEqual(resp.Auth.Policies, []string{"policy1"}) {
+ t.Fatalf("bad: policies: expected: [policy1]; actual: %s", resp.Auth.Policies)
+ }
+
+ // If 'allowed_policies' in role does not have 'default' in it, the
+ // tokens generated using that role should still have the 'default' policy
+ // attached to them.
+ roleReq.Operation = logical.UpdateOperation
+ roleReq.Data = map[string]interface{}{
+ "allowed_policies": "policy1",
+ }
+ resp, err = ts.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err: %v, resp: %v", err, resp)
+ }
+
+ resp, err = ts.HandleRequest(tokenReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err: %v, resp: %v", err, resp)
+ }
+ if !reflect.DeepEqual(resp.Auth.Policies, []string{"default", "policy1"}) {
+ t.Fatalf("bad: policies: expected: [default policy1]; actual: %s", resp.Auth.Policies)
+ }
+
+ // If 'allowed_policies' in role does not have 'default' in it, the
+ // tokens generated using that role should not have 'default' policy
+ // attached to them if disallowed_policies contains "default"
+ roleReq.Operation = logical.UpdateOperation
+ roleReq.Data = map[string]interface{}{
+ "allowed_policies": "policy1",
+ "disallowed_policies": "default",
+ }
+ resp, err = ts.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err: %v, resp: %v", err, resp)
+ }
+
+ resp, err = ts.HandleRequest(tokenReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err: %v, resp: %v", err, resp)
+ }
+ if !reflect.DeepEqual(resp.Auth.Policies, []string{"policy1"}) {
+ t.Fatalf("bad: policies: expected: [policy1]; actual: %s", resp.Auth.Policies)
+ }
+
+ roleReq.Data = map[string]interface{}{
+ "allowed_policies": "",
+ "disallowed_policies": "default",
+ }
+ resp, err = ts.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err: %v, resp: %v", err, resp)
+ }
+
+ resp, err = ts.HandleRequest(tokenReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err: %v, resp: %v", err, resp)
+ }
+ if !reflect.DeepEqual(resp.Auth.Policies, []string{"policy1"}) {
+ t.Fatalf("bad: policies: expected: [policy1]; actual: %s", resp.Auth.Policies)
+ }
+
+ // Ensure that if default is in both allowed and disallowed, disallowed wins
+ roleReq.Data = map[string]interface{}{
+ "allowed_policies": "default",
+ "disallowed_policies": "default",
+ }
+ resp, err = ts.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err: %v, resp: %v", err, resp)
+ }
+
+ delete(tokenReq.Data, "policies")
+ resp, err = ts.HandleRequest(tokenReq)
+ if err == nil || (resp != nil && !resp.IsError()) {
+ t.Fatalf("err: %v, resp: %v", err, resp)
+ }
+}
+
+func TestTokenStore_AllowedDisallowedPolicies(t *testing.T) {
+ var resp *logical.Response
+ var err error
+
+ _, ts, _, root := TestCoreWithTokenStore(t)
+
+ roleReq := &logical.Request{
+ ClientToken: root,
+ Path: "roles/role1",
+ Operation: logical.CreateOperation,
+ Data: map[string]interface{}{
+ "allowed_policies": "allowed1,allowed2",
+ "disallowed_policies": "disallowed1,disallowed2",
+ },
+ }
+ resp, err = ts.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err: %v, resp: %v", err, resp)
+ }
+
+ tokenReq := &logical.Request{
+ Path: "create/role1",
+ ClientToken: root,
+ Operation: logical.UpdateOperation,
+ Data: map[string]interface{}{
+ "policies": []string{"allowed1"},
+ },
+ }
+ resp, err = ts.HandleRequest(tokenReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%v", err, resp)
+ }
+
+ expected := []string{"allowed1", "default"}
+ if !reflect.DeepEqual(resp.Auth.Policies, expected) {
+ t.Fatalf("bad: expected:%#v actual:%#v", expected, resp.Auth.Policies)
+ }
+
+ // Try again with automatic default adding turned off
+ tokenReq = &logical.Request{
+ Path: "create/role1",
+ ClientToken: root,
+ Operation: logical.UpdateOperation,
+ Data: map[string]interface{}{
+ "policies": []string{"allowed1"},
+ "no_default_policy": true,
+ },
+ }
+ resp, err = ts.HandleRequest(tokenReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%v", err, resp)
+ }
+
+ expected = []string{"allowed1"}
+ if !reflect.DeepEqual(resp.Auth.Policies, expected) {
+ t.Fatalf("bad: expected:%#v actual:%#v", expected, resp.Auth.Policies)
+ }
+
+ tokenReq.Data = map[string]interface{}{
+ "policies": []string{"disallowed1"},
+ }
+ resp, err = ts.HandleRequest(tokenReq)
+ if err == nil {
+ t.Fatalf("expected an error")
+ }
+
+ roleReq.Operation = logical.UpdateOperation
+ roleReq.Data = map[string]interface{}{
+ "allowed_policies": "allowed1,common",
+ "disallowed_policies": "disallowed1,common",
+ }
+ resp, err = ts.HandleRequest(roleReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err: %v, resp: %v", err, resp)
+ }
+
+ tokenReq.Data = map[string]interface{}{
+ "policies": []string{"allowed1", "common"},
+ }
+ resp, err = ts.HandleRequest(tokenReq)
+ if err == nil {
+ t.Fatalf("expected an error")
+ }
+}
+
+// Issue 2189
+func TestTokenStore_RevokeUseCountToken(t *testing.T) {
+ var resp *logical.Response
+ var err error
+ cubbyFuncLock := &sync.RWMutex{}
+ cubbyFuncLock.Lock()
+
+ exp := mockExpiration(t)
+ ts := exp.tokenStore
+ root, _ := exp.tokenStore.rootToken()
+
+ tokenReq := &logical.Request{
+ Path: "create",
+ ClientToken: root.ID,
+ Operation: logical.UpdateOperation,
+ Data: map[string]interface{}{
+ "num_uses": 1,
+ },
+ }
+ resp, err = ts.HandleRequest(tokenReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%v", err, resp)
+ }
+
+ tut := resp.Auth.ClientToken
+ saltTut := ts.SaltID(tut)
+ te, err := ts.lookupSalted(saltTut, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if te == nil {
+ t.Fatal("nil entry")
+ }
+ if te.NumUses != 1 {
+ t.Fatalf("bad: %d", te.NumUses)
+ }
+
+ te, err = ts.UseToken(te)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if te == nil {
+ t.Fatal("nil entry")
+ }
+ if te.NumUses != tokenRevocationDeferred {
+ t.Fatalf("bad: %d", te.NumUses)
+ }
+
+ // Should return no entry because it's tainted
+ te, err = ts.lookupSalted(saltTut, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if te != nil {
+ t.Fatalf("%#v", te)
+ }
+
+ // But it should show up in an API lookup call
+ req := &logical.Request{
+ Path: "lookup-self",
+ ClientToken: tut,
+ Operation: logical.UpdateOperation,
+ }
+ resp, err = ts.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil || resp.Data == nil || resp.Data["num_uses"] == nil {
+ t.Fatal("nil resp or data")
+ }
+ if resp.Data["num_uses"].(int) != -1 {
+ t.Fatalf("bad: %v", resp.Data["num_uses"])
+ }
+
+ // Should return tainted entries
+ te, err = ts.lookupSalted(saltTut, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if te == nil {
+ t.Fatal("nil entry")
+ }
+ if te.NumUses != tokenRevocationDeferred {
+ t.Fatalf("bad: %d", te.NumUses)
+ }
+
+ origDestroyCubbyhole := ts.cubbyholeDestroyer
+
+ ts.cubbyholeDestroyer = func(*TokenStore, string) error {
+ return fmt.Errorf("keep it frosty")
+ }
+
+ err = ts.revokeSalted(saltTut)
+ if err == nil {
+ t.Fatalf("expected err")
+ }
+
+ // Since revocation failed we should see the tokenRevocationFailed canary value
+ te, err = ts.lookupSalted(saltTut, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if te == nil {
+ t.Fatal("nil entry")
+ }
+ if te.NumUses != tokenRevocationFailed {
+ t.Fatalf("bad: %d", te.NumUses)
+ }
+
+ // Check the race condition situation by making the process sleep
+ ts.cubbyholeDestroyer = func(*TokenStore, string) error {
+ time.Sleep(1 * time.Second)
+ return fmt.Errorf("keep it frosty")
+ }
+ cubbyFuncLock.Unlock()
+
+ go func() {
+ cubbyFuncLock.RLock()
+ err := ts.revokeSalted(saltTut)
+ cubbyFuncLock.RUnlock()
+ if err == nil {
+ t.Fatalf("expected error")
+ }
+ }()
+
+ // Give time for the function to start and grab locks
+ time.Sleep(200 * time.Millisecond)
+ te, err = ts.lookupSalted(saltTut, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if te == nil {
+ t.Fatal("nil entry")
+ }
+ if te.NumUses != tokenRevocationInProgress {
+ t.Fatalf("bad: %d", te.NumUses)
+ }
+
+ // Let things catch up
+ time.Sleep(2 * time.Second)
+
+ // Put back to normal
+ cubbyFuncLock.Lock()
+ defer cubbyFuncLock.Unlock()
+ ts.cubbyholeDestroyer = origDestroyCubbyhole
+
+ err = ts.revokeSalted(saltTut)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ te, err = ts.lookupSalted(saltTut, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if te != nil {
+ t.Fatal("found entry")
+ }
+}
+
+// Create a token, delete the token entry while leaking accessors, invoke tidy
+// and check if the dangling accessor entry is getting removed
+func TestTokenStore_HandleTidyCase1(t *testing.T) {
+ var resp *logical.Response
+ var err error
+
+ _, ts, _, root := TestCoreWithTokenStore(t)
+
+ // List the number of accessors. Since there is only root token
+ // present, the list operation should return only one key.
+ accessorListReq := &logical.Request{
+ Operation: logical.ListOperation,
+ Path: "accessors",
+ ClientToken: root,
+ }
+ resp, err = ts.HandleRequest(accessorListReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%v", err, resp)
+ }
+
+ numberOfAccessors := len(resp.Data["keys"].([]string))
+ if numberOfAccessors != 1 {
+ t.Fatalf("bad: number of accessors. Expected: 1, Actual: %d", numberOfAccessors)
+ }
+
+ for i := 1; i <= 100; i++ {
+ // Create a regular token
+ tokenReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "create",
+ ClientToken: root,
+ Data: map[string]interface{}{
+ "policies": []string{"policy1"},
+ },
+ }
+ resp, err = ts.HandleRequest(tokenReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%v", err, resp)
+ }
+ tut := resp.Auth.ClientToken
+
+ // Creation of another token should end up with incrementing
+ // the number of accessors
+ // the storage
+ resp, err = ts.HandleRequest(accessorListReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%v", err, resp)
+ }
+
+ numberOfAccessors = len(resp.Data["keys"].([]string))
+ if numberOfAccessors != i+1 {
+ t.Fatalf("bad: number of accessors. Expected: %d, Actual: %d", i+1, numberOfAccessors)
+ }
+
+ // Revoke the token while leaking other items associated with the
+ // token. Do this by doing what revokeSalted used to do before it was
+ // fixed, i.e., by deleting the storage entry for token and its
+ // cubbyhole and by not deleting its secondary index, its accessor and
+ // associated leases.
+
+ saltedTut := ts.SaltID(tut)
+ _, err = ts.lookupSalted(saltedTut, true)
+ if err != nil {
+ t.Fatalf("failed to lookup token: %v", err)
+ }
+
+ // Destroy the token index
+ path := lookupPrefix + saltedTut
+ if ts.view.Delete(path); err != nil {
+ t.Fatalf("failed to delete token entry: %v", err)
+ }
+
+ // Destroy the cubby space
+ err = ts.destroyCubbyhole(saltedTut)
+ if err != nil {
+ t.Fatalf("failed to destroyCubbyhole: %v", err)
+ }
+
+ // Leaking of accessor should have resulted in no change to the number
+ // of accessors
+ resp, err = ts.HandleRequest(accessorListReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%v", err, resp)
+ }
+
+ numberOfAccessors = len(resp.Data["keys"].([]string))
+ if numberOfAccessors != i+1 {
+ t.Fatalf("bad: number of accessors. Expected: %d, Actual: %d", i+1, numberOfAccessors)
+ }
+ }
+
+ tidyReq := &logical.Request{
+ Path: "tidy",
+ Operation: logical.UpdateOperation,
+ ClientToken: root,
+ }
+ resp, err = ts.HandleRequest(tidyReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp != nil && resp.IsError() {
+ t.Fatalf("resp: %#v", resp)
+ }
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%v", err, resp)
+ }
+
+ // Tidy should have removed all the dangling accessor entries
+ resp, err = ts.HandleRequest(accessorListReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%v", err, resp)
+ }
+
+ numberOfAccessors = len(resp.Data["keys"].([]string))
+ if numberOfAccessors != 1 {
+ t.Fatalf("bad: number of accessors. Expected: 1, Actual: %d", numberOfAccessors)
+ }
+}
+
+func TestTokenStore_TidyLeaseRevocation(t *testing.T) {
+ exp := mockExpiration(t)
+ ts := exp.tokenStore
+
+ noop := &NoopBackend{}
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "logical/")
+ meUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID}, view)
+
+ // Create new token
+ root, err := ts.rootToken()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "create")
+ req.ClientToken = root.ID
+ req.Data["policies"] = []string{"default"}
+
+ resp, err := ts.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v %v", err, resp)
+ }
+
+ // Create a new token
+ auth := &logical.Auth{
+ ClientToken: resp.Auth.ClientToken,
+ LeaseOptions: logical.LeaseOptions{
+ TTL: time.Hour,
+ Renewable: true,
+ },
+ }
+ err = exp.RegisterAuth("auth/token/create", auth)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ tut := resp.Auth.ClientToken
+
+ req = &logical.Request{
+ Path: "prod/aws/foo",
+ ClientToken: tut,
+ }
+ resp = &logical.Response{
+ Secret: &logical.Secret{
+ LeaseOptions: logical.LeaseOptions{
+ TTL: time.Hour,
+ },
+ },
+ }
+
+ leases := []string{}
+
+ for i := 0; i < 10; i++ {
+ leaseId, err := exp.Register(req, resp)
+ if err != nil {
+ t.Fatal(err)
+ }
+ leases = append(leases, leaseId)
+ }
+
+ sort.Strings(leases)
+
+ storedLeases, err := exp.lookupByToken(tut)
+ if err != nil {
+ t.Fatal(err)
+ }
+ sort.Strings(storedLeases)
+ if !reflect.DeepEqual(leases, storedLeases) {
+ t.Fatalf("bad: %#v vs %#v", leases, storedLeases)
+ }
+
+ // Now, delete the token entry. The leases should still exist.
+ saltedTut := ts.SaltID(tut)
+ te, err := ts.lookupSalted(saltedTut, true)
+ if err != nil {
+ t.Fatalf("failed to lookup token: %v", err)
+ }
+ if te == nil {
+ t.Fatal("got nil token entry")
+ }
+
+ // Destroy the token index
+ path := lookupPrefix + saltedTut
+ if ts.view.Delete(path); err != nil {
+ t.Fatalf("failed to delete token entry: %v", err)
+ }
+ te, err = ts.lookupSalted(saltedTut, true)
+ if err != nil {
+ t.Fatalf("failed to lookup token: %v", err)
+ }
+ if te != nil {
+ t.Fatal("got token entry")
+ }
+
+ // Verify leases still exist
+ storedLeases, err = exp.lookupByToken(tut)
+ if err != nil {
+ t.Fatal(err)
+ }
+ sort.Strings(storedLeases)
+ if !reflect.DeepEqual(leases, storedLeases) {
+ t.Fatalf("bad: %#v vs %#v", leases, storedLeases)
+ }
+
+ // Call tidy
+ ts.handleTidy(nil, nil)
+
+ // Verify leases are gone
+ storedLeases, err = exp.lookupByToken(tut)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(storedLeases) > 0 {
+ t.Fatal("found leases")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/util.go b/vendor/github.com/hashicorp/vault/vault/util.go
new file mode 100644
index 0000000..9e03afd
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/util.go
@@ -0,0 +1,42 @@
+package vault
+
+import (
+ "crypto/rand"
+ "fmt"
+)
+
+// memzero is used to zero out a byte buffer. This specific format is optimized
+// by the compiler to use memclr to improve performance. See this code review:
+// https://codereview.appspot.com/137880043
+//
+// Use of memzero is not a guarantee against memory analysis as described in
+// the Vault threat model:
+// https://www.vaultproject.io/docs/internals/security.html . Vault does not
+// provide guarantees against memory analysis or raw memory dumping by
+// operators, however it does minimize this exposure by zeroing out buffers
+// that contain secrets as soon as they are no longer used. Starting with Go
+// 1.5, the garbage collector was changed to become a "generational copying
+// garbage collector." This change to the garbage collector makes it
+// impossible for Vault to guarantee a buffer with a secret has not been
+// copied during a garbage collection. It is therefore possible that secrets
+// may be exist in memory that have not been wiped despite a pending memzero
+// call. Over time any copied data with a secret will be reused and the
+// memory overwritten thereby mitigating some of the risk from this threat
+// vector.
+func memzero(b []byte) {
+ if b == nil {
+ return
+ }
+ for i := range b {
+ b[i] = 0
+ }
+}
+
+// randbytes is used to create a buffer of size n filled with random bytes
+func randbytes(n int) []byte {
+ buf := make([]byte, n)
+ if _, err := rand.Read(buf); err != nil {
+ panic(fmt.Sprintf("failed to generate %d random bytes: %v", n, err))
+ }
+ return buf
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/util_test.go b/vendor/github.com/hashicorp/vault/vault/util_test.go
new file mode 100644
index 0000000..70fe1d7
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/util_test.go
@@ -0,0 +1,18 @@
+package vault
+
+import "testing"
+
+func TestMemZero(t *testing.T) {
+ b := []byte{1, 2, 3, 4}
+ memzero(b)
+ if b[0] != 0 || b[1] != 0 || b[2] != 0 || b[3] != 0 {
+ t.Fatalf("bad: %v", b)
+ }
+}
+
+func TestRandBytes(t *testing.T) {
+ b := randbytes(12)
+ if len(b) != 12 {
+ t.Fatalf("bad: %v", b)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/wrapping.go b/vendor/github.com/hashicorp/vault/vault/wrapping.go
new file mode 100644
index 0000000..46409c3
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/wrapping.go
@@ -0,0 +1,311 @@
+package vault
+
+import (
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "encoding/json"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/SermoDigital/jose/crypto"
+ "github.com/SermoDigital/jose/jws"
+ "github.com/SermoDigital/jose/jwt"
+ "github.com/hashicorp/errwrap"
+ "github.com/hashicorp/vault/helper/consts"
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/logical"
+)
+
+const (
+ // The location of the key used to generate response-wrapping JWTs
+ coreWrappingJWTKeyPath = "core/wrapping/jwtkey"
+)
+
+func (c *Core) ensureWrappingKey() error {
+ entry, err := c.barrier.Get(coreWrappingJWTKeyPath)
+ if err != nil {
+ return err
+ }
+
+ var keyParams clusterKeyParams
+
+ if entry == nil {
+ key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
+ if err != nil {
+ return errwrap.Wrapf("failed to generate wrapping key: {{err}}", err)
+ }
+ keyParams.D = key.D
+ keyParams.X = key.X
+ keyParams.Y = key.Y
+ keyParams.Type = corePrivateKeyTypeP521
+ val, err := jsonutil.EncodeJSON(keyParams)
+ if err != nil {
+ return errwrap.Wrapf("failed to encode wrapping key: {{err}}", err)
+ }
+ entry = &Entry{
+ Key: coreWrappingJWTKeyPath,
+ Value: val,
+ }
+ if err = c.barrier.Put(entry); err != nil {
+ return errwrap.Wrapf("failed to store wrapping key: {{err}}", err)
+ }
+ }
+
+ // Redundant if we just created it, but in this case serves as a check anyways
+ if err = jsonutil.DecodeJSON(entry.Value, &keyParams); err != nil {
+ return errwrap.Wrapf("failed to decode wrapping key parameters: {{err}}", err)
+ }
+
+ c.wrappingJWTKey = &ecdsa.PrivateKey{
+ PublicKey: ecdsa.PublicKey{
+ Curve: elliptic.P521(),
+ X: keyParams.X,
+ Y: keyParams.Y,
+ },
+ D: keyParams.D,
+ }
+
+ c.logger.Info("core: loaded wrapping token key")
+
+ return nil
+}
+
+func (c *Core) wrapInCubbyhole(req *logical.Request, resp *logical.Response) (*logical.Response, error) {
+ // Before wrapping, obey special rules for listing: if no entries are
+ // found, 404. This prevents unwrapping only to find empty data.
+ if req.Operation == logical.ListOperation {
+ if resp == nil || len(resp.Data) == 0 {
+ return nil, logical.ErrUnsupportedPath
+ }
+ keysRaw, ok := resp.Data["keys"]
+ if !ok || keysRaw == nil {
+ return nil, logical.ErrUnsupportedPath
+ }
+ keys, ok := keysRaw.([]string)
+ if !ok {
+ return nil, logical.ErrUnsupportedPath
+ }
+ if len(keys) == 0 {
+ return nil, logical.ErrUnsupportedPath
+ }
+ }
+
+ var err error
+
+ // If we are wrapping, the first part (performed in this functions) happens
+ // before auditing so that resp.WrapInfo.Token can contain the HMAC'd
+ // wrapping token ID in the audit logs, so that it can be determined from
+ // the audit logs whether the token was ever actually used.
+ creationTime := time.Now()
+ te := TokenEntry{
+ Path: req.Path,
+ Policies: []string{"response-wrapping"},
+ CreationTime: creationTime.Unix(),
+ TTL: resp.WrapInfo.TTL,
+ NumUses: 1,
+ ExplicitMaxTTL: resp.WrapInfo.TTL,
+ }
+
+ if err := c.tokenStore.create(&te); err != nil {
+ c.logger.Error("core: failed to create wrapping token", "error", err)
+ return nil, ErrInternalError
+ }
+
+ resp.WrapInfo.Token = te.ID
+ resp.WrapInfo.CreationTime = creationTime
+
+ // This will only be non-nil if this response contains a token, so in that
+ // case put the accessor in the wrap info.
+ if resp.Auth != nil {
+ resp.WrapInfo.WrappedAccessor = resp.Auth.Accessor
+ }
+
+ switch resp.WrapInfo.Format {
+ case "jwt":
+ // Create the JWT
+ claims := jws.Claims{}
+ // Map the JWT ID to the token ID for ease ofuse
+ claims.SetJWTID(te.ID)
+ // Set the issue time to the creation time
+ claims.SetIssuedAt(creationTime)
+ // Set the expiration to the TTL
+ claims.SetExpiration(creationTime.Add(resp.WrapInfo.TTL))
+ if resp.Auth != nil {
+ claims.Set("accessor", resp.Auth.Accessor)
+ }
+ claims.Set("type", "wrapping")
+ claims.Set("addr", c.redirectAddr)
+ jwt := jws.NewJWT(claims, crypto.SigningMethodES512)
+ serWebToken, err := jwt.Serialize(c.wrappingJWTKey)
+ if err != nil {
+ c.logger.Error("core: failed to serialize JWT", "error", err)
+ return nil, ErrInternalError
+ }
+ resp.WrapInfo.Token = string(serWebToken)
+ }
+
+ cubbyReq := &logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "cubbyhole/response",
+ ClientToken: te.ID,
+ }
+
+ // During a rewrap, store the original response, don't wrap it again.
+ if req.Path == "sys/wrapping/rewrap" {
+ cubbyReq.Data = map[string]interface{}{
+ "response": resp.Data["response"],
+ }
+ } else {
+ httpResponse := logical.LogicalResponseToHTTPResponse(resp)
+
+ // Add the unique identifier of the original request to the response
+ httpResponse.RequestID = req.ID
+
+ // Because of the way that JSON encodes (likely just in Go) we actually get
+ // mixed-up values for ints if we simply put this object in the response
+ // and encode the whole thing; so instead we marshal it first, then store
+ // the string response. This actually ends up making it easier on the
+ // client side, too, as it becomes a straight read-string-pass-to-unmarshal
+ // operation.
+
+ marshaledResponse, err := json.Marshal(httpResponse)
+ if err != nil {
+ c.logger.Error("core: failed to marshal wrapped response", "error", err)
+ return nil, ErrInternalError
+ }
+
+ cubbyReq.Data = map[string]interface{}{
+ "response": string(marshaledResponse),
+ }
+ }
+
+ cubbyResp, err := c.router.Route(cubbyReq)
+ if err != nil {
+ // Revoke since it's not yet being tracked for expiration
+ c.tokenStore.Revoke(te.ID)
+ c.logger.Error("core: failed to store wrapped response information", "error", err)
+ return nil, ErrInternalError
+ }
+ if cubbyResp != nil && cubbyResp.IsError() {
+ c.tokenStore.Revoke(te.ID)
+ c.logger.Error("core: failed to store wrapped response information", "error", cubbyResp.Data["error"])
+ return cubbyResp, nil
+ }
+
+ // Store info for lookup
+ cubbyReq.Path = "cubbyhole/wrapinfo"
+ cubbyReq.Data = map[string]interface{}{
+ "creation_ttl": resp.WrapInfo.TTL,
+ "creation_time": creationTime,
+ }
+ cubbyResp, err = c.router.Route(cubbyReq)
+ if err != nil {
+ // Revoke since it's not yet being tracked for expiration
+ c.tokenStore.Revoke(te.ID)
+ c.logger.Error("core: failed to store wrapping information", "error", err)
+ return nil, ErrInternalError
+ }
+ if cubbyResp != nil && cubbyResp.IsError() {
+ c.tokenStore.Revoke(te.ID)
+ c.logger.Error("core: failed to store wrapping information", "error", cubbyResp.Data["error"])
+ return cubbyResp, nil
+ }
+
+ auth := &logical.Auth{
+ ClientToken: te.ID,
+ Policies: []string{"response-wrapping"},
+ LeaseOptions: logical.LeaseOptions{
+ TTL: te.TTL,
+ Renewable: false,
+ },
+ }
+
+ // Register the wrapped token with the expiration manager
+ if err := c.expiration.RegisterAuth(te.Path, auth); err != nil {
+ // Revoke since it's not yet being tracked for expiration
+ c.tokenStore.Revoke(te.ID)
+ c.logger.Error("core: failed to register cubbyhole wrapping token lease", "request_path", req.Path, "error", err)
+ return nil, ErrInternalError
+ }
+
+ return nil, nil
+}
+
+func (c *Core) ValidateWrappingToken(req *logical.Request) (bool, error) {
+ if req == nil {
+ return false, fmt.Errorf("invalid request")
+ }
+
+ var err error
+
+ var token string
+ var thirdParty bool
+ if req.Data != nil && req.Data["token"] != nil {
+ thirdParty = true
+ if tokenStr, ok := req.Data["token"].(string); !ok {
+ return false, fmt.Errorf("could not decode token in request body")
+ } else if tokenStr == "" {
+ return false, fmt.Errorf("empty token in request body")
+ } else {
+ token = tokenStr
+ }
+ } else {
+ token = req.ClientToken
+ }
+
+ // Check for it being a JWT. If it is, and it is valid, we extract the
+ // internal client token from it and use that during lookup.
+ if strings.Count(token, ".") == 2 {
+ wt, err := jws.ParseJWT([]byte(token))
+ // If there's an error we simply fall back to attempting to use it as a regular token
+ if err == nil && wt != nil {
+ validator := &jwt.Validator{}
+ validator.SetClaim("type", "wrapping")
+ if err = wt.Validate(&c.wrappingJWTKey.PublicKey, crypto.SigningMethodES512, []*jwt.Validator{validator}...); err != nil {
+ return false, errwrap.Wrapf("wrapping token signature could not be validated: {{err}}", err)
+ }
+ token, _ = wt.Claims().JWTID()
+ // We override the given request client token so that the rest of
+ // Vault sees the real value. This also ensures audit logs are
+ // consistent with the actual token that was issued.
+ if !thirdParty {
+ req.ClientToken = token
+ } else {
+ req.Data["token"] = token
+ }
+ }
+ }
+
+ if token == "" {
+ return false, fmt.Errorf("token is empty")
+ }
+
+ c.stateLock.RLock()
+ defer c.stateLock.RUnlock()
+ if c.sealed {
+ return false, consts.ErrSealed
+ }
+ if c.standby {
+ return false, consts.ErrStandby
+ }
+
+ te, err := c.tokenStore.Lookup(token)
+ if err != nil {
+ return false, err
+ }
+ if te == nil {
+ return false, nil
+ }
+
+ if len(te.Policies) != 1 {
+ return false, nil
+ }
+
+ if te.Policies[0] != responseWrappingPolicyName {
+ return false, nil
+ }
+
+ return true, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/version/cgo.go b/vendor/github.com/hashicorp/vault/version/cgo.go
new file mode 100644
index 0000000..2ed493a
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/version/cgo.go
@@ -0,0 +1,7 @@
+// +build cgo
+
+package version
+
+func init() {
+ CgoEnabled = true
+}
diff --git a/vendor/github.com/hashicorp/vault/version/version.go b/vendor/github.com/hashicorp/vault/version/version.go
new file mode 100644
index 0000000..0f81933
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/version/version.go
@@ -0,0 +1,87 @@
+package version
+
+import (
+ "bytes"
+ "fmt"
+)
+
+var (
+ // The git commit that was compiled. This will be filled in by the compiler.
+ GitCommit string
+ GitDescribe string
+
+ // Whether cgo is enabled or not; set at build time
+ CgoEnabled bool
+
+ Version = "unknown"
+ VersionPrerelease = "unknown"
+ VersionMetadata = ""
+)
+
+// VersionInfo
+type VersionInfo struct {
+ Revision string
+ Version string
+ VersionPrerelease string
+ VersionMetadata string
+}
+
+func GetVersion() *VersionInfo {
+ ver := Version
+ rel := VersionPrerelease
+ md := VersionMetadata
+ if GitDescribe != "" {
+ ver = GitDescribe
+ }
+ if GitDescribe == "" && rel == "" && VersionPrerelease != "" {
+ rel = "dev"
+ }
+
+ return &VersionInfo{
+ Revision: GitCommit,
+ Version: ver,
+ VersionPrerelease: rel,
+ VersionMetadata: md,
+ }
+}
+
+func (c *VersionInfo) VersionNumber() string {
+ if Version == "unknown" && VersionPrerelease == "unknown" {
+ return "(version unknown)"
+ }
+
+ version := fmt.Sprintf("%s", c.Version)
+
+ if c.VersionPrerelease != "" {
+ version = fmt.Sprintf("%s-%s", version, c.VersionPrerelease)
+ }
+
+ if c.VersionMetadata != "" {
+ version = fmt.Sprintf("%s+%s", version, c.VersionMetadata)
+ }
+
+ return version
+}
+
+func (c *VersionInfo) FullVersionNumber(rev bool) string {
+ var versionString bytes.Buffer
+
+ if Version == "unknown" && VersionPrerelease == "unknown" {
+ return "Vault (version unknown)"
+ }
+
+ fmt.Fprintf(&versionString, "Vault v%s", c.Version)
+ if c.VersionPrerelease != "" {
+ fmt.Fprintf(&versionString, "-%s", c.VersionPrerelease)
+ }
+
+ if c.VersionMetadata != "" {
+ fmt.Fprintf(&versionString, "+%s", c.VersionMetadata)
+ }
+
+ if rev && c.Revision != "" {
+ fmt.Fprintf(&versionString, " (%s)", c.Revision)
+ }
+
+ return versionString.String()
+}
diff --git a/vendor/github.com/hashicorp/vault/version/version_base.go b/vendor/github.com/hashicorp/vault/version/version_base.go
new file mode 100644
index 0000000..bd1d2ca
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/version/version_base.go
@@ -0,0 +1,13 @@
+// +build vault
+
+package version
+
+func init() {
+ // The main version number that is being run at the moment.
+ Version = "0.7.0"
+
+ // A pre-release marker for the version. If this is "" (empty string)
+ // then it means that it is a final release. Otherwise, this is a pre-release
+ // such as "dev" (in development), "beta", "rc1", etc.
+ VersionPrerelease = ""
+}
diff --git a/vendor/github.com/hashicorp/vault/website/Gemfile b/vendor/github.com/hashicorp/vault/website/Gemfile
new file mode 100644
index 0000000..405a8c9
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/Gemfile
@@ -0,0 +1,3 @@
+source "https://rubygems.org"
+
+gem "middleman-hashicorp", "0.3.22"
diff --git a/vendor/github.com/hashicorp/vault/website/Gemfile.lock b/vendor/github.com/hashicorp/vault/website/Gemfile.lock
new file mode 100644
index 0000000..229218a
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/Gemfile.lock
@@ -0,0 +1,157 @@
+GEM
+ remote: https://rubygems.org/
+ specs:
+ activesupport (4.2.8)
+ i18n (~> 0.7)
+ minitest (~> 5.1)
+ thread_safe (~> 0.3, >= 0.3.4)
+ tzinfo (~> 1.1)
+ autoprefixer-rails (6.7.7.1)
+ execjs
+ bootstrap-sass (3.3.7)
+ autoprefixer-rails (>= 5.2.1)
+ sass (>= 3.3.4)
+ builder (3.2.3)
+ capybara (2.4.4)
+ mime-types (>= 1.16)
+ nokogiri (>= 1.3.3)
+ rack (>= 1.0.0)
+ rack-test (>= 0.5.4)
+ xpath (~> 2.0)
+ chunky_png (1.3.8)
+ coffee-script (2.4.1)
+ coffee-script-source
+ execjs
+ coffee-script-source (1.12.2)
+ compass (1.0.3)
+ chunky_png (~> 1.2)
+ compass-core (~> 1.0.2)
+ compass-import-once (~> 1.0.5)
+ rb-fsevent (>= 0.9.3)
+ rb-inotify (>= 0.9)
+ sass (>= 3.3.13, < 3.5)
+ compass-core (1.0.3)
+ multi_json (~> 1.0)
+ sass (>= 3.3.0, < 3.5)
+ compass-import-once (1.0.5)
+ sass (>= 3.2, < 3.5)
+ em-websocket (0.5.1)
+ eventmachine (>= 0.12.9)
+ http_parser.rb (~> 0.6.0)
+ erubis (2.7.0)
+ eventmachine (1.2.3)
+ execjs (2.7.0)
+ ffi (1.9.18)
+ haml (4.0.7)
+ tilt
+ hike (1.2.3)
+ hooks (0.4.1)
+ uber (~> 0.0.14)
+ http_parser.rb (0.6.0)
+ i18n (0.7.0)
+ json (2.0.3)
+ kramdown (1.13.2)
+ listen (3.0.8)
+ rb-fsevent (~> 0.9, >= 0.9.4)
+ rb-inotify (~> 0.9, >= 0.9.7)
+ middleman (3.4.1)
+ coffee-script (~> 2.2)
+ compass (>= 1.0.0, < 2.0.0)
+ compass-import-once (= 1.0.5)
+ execjs (~> 2.0)
+ haml (>= 4.0.5)
+ kramdown (~> 1.2)
+ middleman-core (= 3.4.1)
+ middleman-sprockets (>= 3.1.2)
+ sass (>= 3.4.0, < 4.0)
+ uglifier (~> 2.5)
+ middleman-core (3.4.1)
+ activesupport (~> 4.1)
+ bundler (~> 1.1)
+ capybara (~> 2.4.4)
+ erubis
+ hooks (~> 0.3)
+ i18n (~> 0.7.0)
+ listen (~> 3.0.3)
+ padrino-helpers (~> 0.12.3)
+ rack (>= 1.4.5, < 2.0)
+ thor (>= 0.15.2, < 2.0)
+ tilt (~> 1.4.1, < 2.0)
+ middleman-hashicorp (0.3.22)
+ bootstrap-sass (~> 3.3)
+ builder (~> 3.2)
+ middleman (~> 3.4)
+ middleman-livereload (~> 3.4)
+ middleman-syntax (~> 3.0)
+ redcarpet (~> 3.3)
+ turbolinks (~> 5.0)
+ middleman-livereload (3.4.6)
+ em-websocket (~> 0.5.1)
+ middleman-core (>= 3.3)
+ rack-livereload (~> 0.3.15)
+ middleman-sprockets (3.5.0)
+ middleman-core (>= 3.3)
+ sprockets (~> 2.12.1)
+ sprockets-helpers (~> 1.1.0)
+ sprockets-sass (~> 1.3.0)
+ middleman-syntax (3.0.0)
+ middleman-core (>= 3.2)
+ rouge (~> 2.0)
+ mime-types (3.1)
+ mime-types-data (~> 3.2015)
+ mime-types-data (3.2016.0521)
+ mini_portile2 (2.1.0)
+ minitest (5.10.1)
+ multi_json (1.12.1)
+ nokogiri (1.7.1)
+ mini_portile2 (~> 2.1.0)
+ padrino-helpers (0.12.8.1)
+ i18n (~> 0.6, >= 0.6.7)
+ padrino-support (= 0.12.8.1)
+ tilt (~> 1.4.1)
+ padrino-support (0.12.8.1)
+ activesupport (>= 3.1)
+ rack (1.6.5)
+ rack-livereload (0.3.16)
+ rack
+ rack-test (0.6.3)
+ rack (>= 1.0)
+ rb-fsevent (0.9.8)
+ rb-inotify (0.9.8)
+ ffi (>= 0.5.0)
+ redcarpet (3.4.0)
+ rouge (2.0.7)
+ sass (3.4.23)
+ sprockets (2.12.4)
+ hike (~> 1.2)
+ multi_json (~> 1.0)
+ rack (~> 1.0)
+ tilt (~> 1.1, != 1.3.0)
+ sprockets-helpers (1.1.0)
+ sprockets (~> 2.0)
+ sprockets-sass (1.3.1)
+ sprockets (~> 2.0)
+ tilt (~> 1.1)
+ thor (0.19.4)
+ thread_safe (0.3.6)
+ tilt (1.4.1)
+ turbolinks (5.0.1)
+ turbolinks-source (~> 5)
+ turbolinks-source (5.0.0)
+ tzinfo (1.2.3)
+ thread_safe (~> 0.1)
+ uber (0.0.15)
+ uglifier (2.7.2)
+ execjs (>= 0.3.0)
+ json (>= 1.8.0)
+ xpath (2.0.0)
+ nokogiri (~> 1.3)
+
+PLATFORMS
+ ruby
+
+DEPENDENCIES
+ middleman-hashicorp (= 0.3.22)
+
+BUNDLED WITH
+ 1.14.6
diff --git a/vendor/github.com/hashicorp/vault/website/LICENSE.md b/vendor/github.com/hashicorp/vault/website/LICENSE.md
new file mode 100644
index 0000000..3189f43
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/LICENSE.md
@@ -0,0 +1,10 @@
+# Proprietary License
+
+This license is temporary while a more official one is drafted. However,
+this should make it clear:
+
+The text contents of this website are MPL 2.0 licensed.
+
+The design contents of this website are proprietary and may not be reproduced
+or reused in any way other than to run the website locally. The license for
+the design is owned solely by HashiCorp, Inc.
diff --git a/vendor/github.com/hashicorp/vault/website/Makefile b/vendor/github.com/hashicorp/vault/website/Makefile
new file mode 100644
index 0000000..d7620d1
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/Makefile
@@ -0,0 +1,24 @@
+VERSION?="0.3.22"
+
+build:
+ @echo "==> Starting build in Docker..."
+ @docker run \
+ --interactive \
+ --rm \
+ --tty \
+ --volume "$(shell pwd):/website" \
+ hashicorp/middleman-hashicorp:${VERSION} \
+ bundle exec middleman build --verbose --clean
+
+website:
+ @echo "==> Starting website in Docker..."
+ @docker run \
+ --interactive \
+ --rm \
+ --tty \
+ --publish "4567:4567" \
+ --publish "35729:35729" \
+ --volume "$(shell pwd):/website" \
+ hashicorp/middleman-hashicorp:${VERSION}
+
+.PHONY: build website
diff --git a/vendor/github.com/hashicorp/vault/website/README.md b/vendor/github.com/hashicorp/vault/website/README.md
new file mode 100644
index 0000000..8a6f2cd
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/README.md
@@ -0,0 +1,21 @@
+# Vault Website
+
+This subdirectory contains the entire source for the [Vault Website][vault].
+This is a [Middleman][middleman] project, which builds a static site from these
+source files.
+
+## Contributions Welcome!
+
+If you find a typo or you feel like you can improve the HTML, CSS, or
+JavaScript, we welcome contributions. Feel free to open issues or pull requests
+like any normal GitHub project, and we'll merge it in.
+
+## Running the Site Locally
+
+Running the site locally is simple. Clone this repo and run `make website`.
+
+Then open up `http://localhost:4567`. Note that some URLs you may need to append
+".html" to make them work (in the navigation).
+
+[middleman]: https://www.middlemanapp.com
+[vault]: https://www.vaultproject.io
diff --git a/vendor/github.com/hashicorp/vault/website/config.rb b/vendor/github.com/hashicorp/vault/website/config.rb
new file mode 100644
index 0000000..1ca9c0c
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/config.rb
@@ -0,0 +1,101 @@
+set :base_url, "https://www.vaultproject.io/"
+
+activate :hashicorp do |h|
+ h.name = "vault"
+ h.version = "0.7.0"
+ h.github_slug = "hashicorp/vault"
+ h.website_root = "website"
+end
+
+helpers do
+ # Returns the FQDN of the image URL.
+ #
+ # @param [String] path
+ #
+ # @return [String]
+ def image_url(path)
+ File.join(base_url, image_path(path))
+ end
+
+ # Get the title for the page.
+ #
+ # @param [Middleman::Page] page
+ #
+ # @return [String]
+ def title_for(page)
+ if page && page.data.page_title
+ return "#{page.data.page_title} - Vault by HashiCorp"
+ end
+
+ "Vault by HashiCorp"
+ end
+
+ # Get the description for the page
+ #
+ # @param [Middleman::Page] page
+ #
+ # @return [String]
+ def description_for(page)
+ description = (page.data.description || "")
+ .gsub('"', '')
+ .gsub(/\n+/, ' ')
+ .squeeze(' ')
+
+ return escape_html(description)
+ end
+
+ # This helps by setting the "active" class for sidebar nav elements
+ # if the YAML frontmatter matches the expected value.
+ def sidebar_current(expected)
+ current = current_page.data.sidebar_current || ""
+ if current.start_with?(expected)
+ return " class=\"active\""
+ else
+ return ""
+ end
+ end
+
+ # Returns the id for this page.
+ # @return [String]
+ def body_id_for(page)
+ if !(name = page.data.sidebar_current).blank?
+ return "page-#{name.strip}"
+ end
+ if page.url == "/" || page.url == "/index.html"
+ return "page-home"
+ end
+ if !(title = page.data.page_title).blank?
+ return title
+ .downcase
+ .gsub('"', '')
+ .gsub(/[^\w]+/, '-')
+ .gsub(/_+/, '-')
+ .squeeze('-')
+ .squeeze(' ')
+ end
+ return ""
+ end
+
+ # Returns the list of classes for this page.
+ # @return [String]
+ def body_classes_for(page)
+ classes = []
+
+ if !(layout = page.data.layout).blank?
+ classes << "layout-#{page.data.layout}"
+ end
+
+ if !(title = page.data.page_title).blank?
+ title = title
+ .downcase
+ .gsub('"', '')
+ .gsub(/[^\w]+/, '-')
+ .gsub(/_+/, '-')
+ .squeeze('-')
+ .squeeze(' ')
+ classes << "page-#{title}"
+ end
+
+ return classes.join(" ")
+ end
+end
diff --git a/vendor/github.com/hashicorp/vault/website/packer.json b/vendor/github.com/hashicorp/vault/website/packer.json
new file mode 100644
index 0000000..35de632
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/packer.json
@@ -0,0 +1,37 @@
+{
+ "variables": {
+ "aws_access_key_id": "{{ env `AWS_ACCESS_KEY_ID` }}",
+ "aws_secret_access_key": "{{ env `AWS_SECRET_ACCESS_KEY` }}",
+ "aws_region": "{{ env `AWS_REGION` }}",
+ "fastly_api_key": "{{ env `FASTLY_API_KEY` }}"
+ },
+ "builders": [
+ {
+ "type": "docker",
+ "image": "hashicorp/middleman-hashicorp:0.3.22",
+ "discard": "true",
+ "run_command": ["-d", "-i", "-t", "{{ .Image }}", "/bin/sh"]
+ }
+ ],
+ "provisioners": [
+ {
+ "type": "file",
+ "source": ".",
+ "destination": "/website"
+ },
+ {
+ "type": "shell",
+ "environment_vars": [
+ "AWS_ACCESS_KEY_ID={{ user `aws_access_key_id` }}",
+ "AWS_SECRET_ACCESS_KEY={{ user `aws_secret_access_key` }}",
+ "AWS_REGION={{ user `aws_region` }}",
+ "FASTLY_API_KEY={{ user `fastly_api_key` }}"
+ ],
+ "inline": [
+ "bundle check || bundle install",
+ "bundle exec middleman build",
+ "/bin/sh ./scripts/deploy.sh"
+ ]
+ }
+ ]
+}
diff --git a/vendor/github.com/hashicorp/vault/website/scripts/deploy.sh b/vendor/github.com/hashicorp/vault/website/scripts/deploy.sh
new file mode 100755
index 0000000..383ad8a
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/scripts/deploy.sh
@@ -0,0 +1,125 @@
+#!/bin/bash
+set -e
+
+PROJECT="vault"
+PROJECT_URL="www.vaultproject.io"
+FASTLY_SERVICE_ID="7GrxRJP3PVBuqQbyxYQ0MV"
+
+# Ensure the proper AWS environment variables are set
+if [ -z "$AWS_ACCESS_KEY_ID" ]; then
+ echo "Missing AWS_ACCESS_KEY_ID!"
+ exit 1
+fi
+
+if [ -z "$AWS_SECRET_ACCESS_KEY" ]; then
+ echo "Missing AWS_SECRET_ACCESS_KEY!"
+ exit 1
+fi
+
+# Ensure the proper Fastly keys are set
+if [ -z "$FASTLY_API_KEY" ]; then
+ echo "Missing FASTLY_API_KEY!"
+ exit 1
+fi
+
+# Ensure we have s3cmd installed
+if ! command -v "s3cmd" >/dev/null 2>&1; then
+ echo "Missing s3cmd!"
+ exit 1
+fi
+
+# Get the parent directory of where this script is and cd there
+DIR="$(cd "$(dirname "$(readlink -f "$0")")/.." && pwd)"
+
+# Delete any .DS_Store files for our OS X friends.
+find "$DIR" -type f -name '.DS_Store' -delete
+
+# Upload the files to S3 - we disable mime-type detection by the python library
+# and just guess from the file extension because it's surprisingly more
+# accurate, especially for CSS and javascript. We also tag the uploaded files
+# with the proper Surrogate-Key, which we will later purge in our API call to
+# Fastly.
+if [ -z "$NO_UPLOAD" ]; then
+ echo "Uploading to S3..."
+
+ # Check that the site has been built
+ if [ ! -d "$DIR/build" ]; then
+ echo "Missing compiled website! Run 'make build' to compile!"
+ exit 1
+ fi
+
+ # Set browser-side cache-control to ~4h, but tell Fastly to cache for much
+ # longer. We manually purge the Fastly cache, so setting it to a year is more
+ # than fine.
+ s3cmd \
+ --quiet \
+ --delete-removed \
+ --guess-mime-type \
+ --no-mime-magic \
+ --acl-public \
+ --recursive \
+ --add-header="Cache-Control: max-age=14400" \
+ --add-header="x-amz-meta-surrogate-control: max-age=31536000, stale-white-revalidate=86400, stale-if-error=604800" \
+ --add-header="x-amz-meta-surrogate-key: site-$PROJECT" \
+ sync "$DIR/build/" "s3://hc-sites/$PROJECT/latest/"
+
+ # The s3cmd guessed mime type for text files is often wrong. This is
+ # problematic for some assets, so force their mime types to be correct.
+ echo "Overriding javascript mime-types..."
+ s3cmd \
+ --mime-type="application/javascript" \
+ --add-header="Cache-Control: max-age=31536000" \
+ --exclude "*" \
+ --include "*.js" \
+ --recursive \
+ modify "s3://hc-sites/$PROJECT/latest/"
+
+ echo "Overriding css mime-types..."
+ s3cmd \
+ --mime-type="text/css" \
+ --add-header="Cache-Control: max-age=31536000" \
+ --exclude "*" \
+ --include "*.css" \
+ --recursive \
+ modify "s3://hc-sites/$PROJECT/latest/"
+
+ echo "Overriding svg mime-types..."
+ s3cmd \
+ --mime-type="image/svg+xml" \
+ --add-header="Cache-Control: max-age=31536000" \
+ --exclude "*" \
+ --include "*.svg" \
+ --recursive \
+ modify "s3://hc-sites/$PROJECT/latest/"
+fi
+
+# Perform a purge of the surrogate key.
+if [ -z "$NO_PURGE" ]; then
+ echo "Purging Fastly cache..."
+ curl \
+ --fail \
+ --silent \
+ --output /dev/null \
+ --request "POST" \
+ --header "Accept: application/json" \
+ --header "Fastly-Key: $FASTLY_API_KEY" \
+ --header "Fastly-Soft-Purge: 1" \
+ "https://api.fastly.com/service/$FASTLY_SERVICE_ID/purge/site-$PROJECT"
+fi
+
+# Warm the cache with recursive wget.
+if [ -z "$NO_WARM" ]; then
+ echo "Warming Fastly cache..."
+ echo ""
+ echo "If this step fails, there are likely missing or broken assets or links"
+ echo "on the website. Run the following command manually on your laptop, and"
+ echo "search for \"ERROR\" in the output:"
+ echo ""
+ echo "wget --recursive --delete-after https://$PROJECT_URL/"
+ echo ""
+ wget \
+ --recursive \
+ --delete-after \
+ --quiet \
+ "https://$PROJECT_URL/"
+fi
diff --git a/vendor/github.com/hashicorp/vault/website/source/.gitignore b/vendor/github.com/hashicorp/vault/website/source/.gitignore
new file mode 100644
index 0000000..67550a1
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/.gitignore
@@ -0,0 +1,2 @@
+# Source folder
+node_modules/
diff --git a/vendor/github.com/hashicorp/vault/website/source/404.html.md b/vendor/github.com/hashicorp/vault/website/source/404.html.md
new file mode 100644
index 0000000..9843b00
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/404.html.md
@@ -0,0 +1,14 @@
+---
+layout: "inner"
+page_title: "Not Found"
+noindex: true
+description: |-
+ Page not found!
+---
+
+# Page Not Found
+
+Sorry, the page you tried to visit does not exist. This could be our fault,
+and if so we will fix that up right away.
+
+Please go back to get back on track.
diff --git a/vendor/github.com/hashicorp/vault/website/source/_ember_steps.html.erb b/vendor/github.com/hashicorp/vault/website/source/_ember_steps.html.erb
new file mode 100644
index 0000000..c28a008
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/_ember_steps.html.erb
@@ -0,0 +1,146 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/vendor/github.com/hashicorp/vault/website/source/_ember_templates.html.erb b/vendor/github.com/hashicorp/vault/website/source/_ember_templates.html.erb
new file mode 100644
index 0000000..23184ab
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/_ember_templates.html.erb
@@ -0,0 +1,32 @@
+
+
+
+
+
diff --git a/vendor/github.com/hashicorp/vault/website/source/android-manifest.json.erb b/vendor/github.com/hashicorp/vault/website/source/android-manifest.json.erb
new file mode 100644
index 0000000..be0a4bb
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/android-manifest.json.erb
@@ -0,0 +1,18 @@
+{
+ "name": "Vault",
+ "icons": [
+ {
+ "src": "<%= image_path('favicons/android-chrome-192x192.png') %>",
+ "sizes": "192x192",
+ "type": "image/png"
+ },
+ {
+ "src": "<%= image_path('favicons/android-chrome-512x512.png') %>",
+ "sizes": "512x512",
+ "type": "image/png"
+ }
+ ],
+ "theme_color": "#ffffff",
+ "background_color": "#ffffff",
+ "display": "standalone"
+}
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/index.html.md
new file mode 100644
index 0000000..719b471
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/index.html.md
@@ -0,0 +1,174 @@
+---
+layout: "api"
+page_title: "HTTP API"
+sidebar_current: "docs-http-overview"
+description: |-
+ Vault has an HTTP API that can be used to control every aspect of Vault.
+---
+
+# HTTP API
+
+The Vault HTTP API gives you full access to Vault via HTTP. Every
+aspect of Vault can be controlled via this API. The Vault CLI uses
+the HTTP API to access Vault.
+
+## Version Prefix
+
+All API routes are prefixed with `/v1/`.
+
+This documentation is only for the v1 API.
+
+~> **Backwards compatibility:** At the current version, Vault does
+not yet promise backwards compatibility even with the v1 prefix. We'll
+remove this warning when this policy changes. We expect we'll reach API
+stability by Vault 1.0.
+
+## Transport
+
+The API is expected to be accessed over a TLS connection at
+all times, with a valid certificate that is verified by a well
+behaved client. It is possible to disable TLS verification for
+listeners, however, so API clients should expect to have to do both
+depending on user settings.
+
+## Authentication
+
+Once the Vault is unsealed, every other operation requires a _client token_. A
+user may have a client token sent to her. The client token must be sent as the
+`X-Vault-Token` HTTP header.
+
+Otherwise, a client token can be retrieved via [authentication
+backends](/docs/auth/index.html).
+
+Each authentication backend will have one or more unauthenticated login
+endpoints. These endpoints can be reached without any authentication, and are
+used for authentication itself. These endpoints are specific to each
+authentication backend.
+
+Login endpoints for authentication backends that generate an identity will be
+sent down via JSON. The resulting token should be saved on the client or passed
+via the `X-Vault-Token` header for future requests.
+
+## Reading, Writing, and Listing Secrets
+
+Different backends implement different APIs according to their functionality.
+The examples below are created with the `generic` backend, which acts like a
+Key/Value store. Read the documentation for a particular backend for detailed
+information on its API; this simply provides a general overview.
+
+Reading a secret via the HTTP API is done by issuing a GET using the
+following URL:
+
+```text
+/v1/secret/foo
+```
+
+This maps to `secret/foo` where `foo` is the key in the `secret/` mount, which
+is mounted by default on a fresh Vault install and is of type `generic`.
+
+Here is an example of reading a secret using cURL:
+
+```shell
+$ curl \
+ -H "X-Vault-Token: f3b09679-3001-009d-2b80-9c306ab81aa6" \
+ -X GET \
+ http://127.0.0.1:8200/v1/secret/foo
+```
+
+You can list secrets as well. To do this, either issue a GET with the query
+parameter `list=true`, or you can use the LIST HTTP verb. For the `generic`
+backend, listing is allowed on directories only, and returns the keys in the
+given directory:
+
+```shell
+$ curl \
+ -H "X-Vault-Token: f3b09679-3001-009d-2b80-9c306ab81aa6" \
+ -X GET \
+ http://127.0.0.1:8200/v1/secret/?list=true
+```
+
+To write a secret, issue a POST on the following URL:
+
+```text
+/v1/secret/foo
+```
+
+with a JSON body like:
+
+```javascript
+{
+ "value": "bar"
+}
+```
+
+Here is an example of writing a secret using cURL:
+
+```shell
+$ curl \
+ -H "X-Vault-Token: f3b09679-3001-009d-2b80-9c306ab81aa6" \
+ -H "Content-Type: application/json" \
+ -X POST \
+ -d '{"value":"bar"}' \
+ http://127.0.0.1:8200/v1/secret/baz
+```
+
+Vault currently considers PUT and POST to be synonyms. Rather than trust a
+client's stated intentions, Vault backends can implement an existence check to
+discover whether an operation is actually a create or update operation based on
+the data already stored within Vault.
+
+For more examples, please look at the Vault API client.
+
+## Help
+
+To retrieve the help for any API within Vault, including mounted
+backends, credential providers, etc. then append `?help=1` to any
+URL. If you have valid permission to access the path, then the help text
+will be returned with the following structure:
+
+```javascript
+{
+ "help": "help text"
+}
+```
+
+## Error Response
+
+A common JSON structure is always returned to return errors:
+
+```javascript
+{
+ "errors": [
+ "message",
+ "another message"
+ ]
+}
+```
+
+This structure will be sent down for any HTTP status greater than
+or equal to 400.
+
+## HTTP Status Codes
+
+The following HTTP status codes are used throughout the API.
+
+- `200` - Success with data.
+- `204` - Success, no data returned.
+- `400` - Invalid request, missing or invalid data. See the
+ "validation" section for more details on the error response.
+- `403` - Forbidden, your authentication details are either
+ incorrect or you don't have access to this feature.
+- `404` - Invalid path. This can both mean that the path truly
+ doesn't exist or that you don't have permission to view a
+ specific path. We use 404 in some cases to avoid state leakage.
+- `429` - Default return code for health status of standby nodes, indicating a
+ warning.
+- `500` - Internal server error. An internal error has occurred,
+ try again later. If the error persists, report a bug.
+- `503` - Vault is down for maintenance or is currently sealed.
+ Try again later.
+
+## Limits
+
+A maximum request size of 32MB is imposed to prevent a denial
+of service attack with arbitrarily large requests.
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/libraries.html.md b/vendor/github.com/hashicorp/vault/website/source/api/libraries.html.md
new file mode 100644
index 0000000..3e35be5
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/libraries.html.md
@@ -0,0 +1,135 @@
+---
+layout: "api"
+page_title: "HTTP API: Libraries"
+sidebar_current: "docs-http-libraries"
+description: |-
+ List of official and community contributed libraries for interacting with the Vault HTTP API.
+---
+
+# Libraries
+
+The programming libraries listed on this page can be used to consume the API more conveniently.
+Some are officially maintained while others are provided by the community.
+
+## Official
+
+These libraries are officially maintained by HashiCorp.
+
+### Go
+
+* [Vault Go Client](https://github.com/hashicorp/vault/tree/master/api)
+
+```shell
+$ go get github.com/hashicorp/vault/api
+```
+
+### Ruby
+
+* [Vault Ruby Client](https://github.com/hashicorp/vault-ruby)
+
+```shell
+$ gem install vault
+```
+
+## Community
+
+These libraries are provided by the community.
+
+### Ansible
+
+* [Ansible Modules Hashivault](https://pypi.python.org/pypi/ansible-modules-hashivault)
+
+```shell
+$ pip install ansible-modules-hashivault
+```
+
+### C#
+
+* [VaultSharp](https://github.com/rajanadar/VaultSharp) (.NET Standard = 1.4 (.NET Core >= 1.0.0) and also .NET 4.5.x, .NET 4.6.x)
+
+```shell
+$ Install-Package VaultSharp
+```
+
+* [Vault.NET](https://github.com/Chatham/Vault.NET)
+
+```shell
+$ Install-Package Vault
+```
+
+### Clojure
+
+* [vault-clj](https://github.com/amperity/vault-clj)
+
+### Elixir
+
+* [vaultex](https://hex.pm/packages/vaultex)
+
+### Go
+
+* [vc](https://github.com/adfinis-sygroup/vault-client)
+
+```shell
+$ go get github.com/adfinis-sygroup/vault-client
+```
+
+### Haskell
+
+* [vault-tool](https://hackage.haskell.org/package/vault-tool)
+
+```shell
+$ cabal install vault-tool
+```
+
+### Java
+
+* [Spring Vault](https://github.com/spring-projects/spring-vault)
+* [vault-java](https://github.com/jhaals/vault-java)
+* [vault-java-driver](https://github.com/BetterCloud/vault-java-driver)
+
+### Kotlin
+
+* [vault-kotlin](https://github.com/kunickiaj/vault-kotlin)
+
+### Node.js
+
+* [node-vault](https://github.com/kr1sp1n/node-vault)
+
+```shell
+$ npm install node-vault
+```
+
+* [vaulted](https://github.com/chiefy/vaulted)
+
+```shell
+$ npm install vaulted
+```
+
+### PHP
+
+* [vault-php-sdk](https://github.com/jippi/vault-php-sdk)
+
+```shell
+$ composer require jippi/vault-php-sdk
+```
+
+* [vault-php-sdk](https://github.com/violuke/vault-php-sdk) extended from jipppi
+
+```shell
+$ composer require violuke/vault-php-sdk
+```
+
+### Python
+
+* [HVAC](https://github.com/ianunruh/hvac)
+
+```shell
+$ pip install hvac
+```
+
+### Rust
+
+* [HashicorpVault](https://crates.io/crates/hashicorp_vault)
+
+### Scala
+ * [scala-vault](https://github.com/janstenpickle/scala-vault)
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/aws/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/aws/index.html.md
new file mode 100644
index 0000000..25dc268
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/aws/index.html.md
@@ -0,0 +1,363 @@
+---
+layout: "api"
+page_title: "AWS Secret Backend - HTTP API"
+sidebar_current: "docs-http-secret-aws"
+description: |-
+ This is the API documentation for the Vault AWS secret backend.
+---
+
+# AWS Secret Backend HTTP API
+
+This is the API documentation for the Vault AWS secret backend. For general
+information about the usage and operation of the AWS backend, please see the
+[Vault AWS backend documentation](/docs/secrets/aws/index.html).
+
+This documentation assumes the AWS backend is mounted at the `/aws` path in
+Vault. Since it is possible to mount secret backends at any location, please
+update your API calls accordingly.
+
+## Configure Root IAM Credentials
+
+This endpoint configures the root IAM credentials to communicate with AWS. There
+are multiple ways to pass root IAM credentials to the Vault server, specified
+below with the highest precedence first. If credentials already exist, this will
+overwrite them.
+
+- Static credentials provided to the API as a payload
+
+- Credentials in the `AWS_ACCESS_KEY`, `AWS_SECRET_KEY`, and `AWS_REGION`
+ environment variables **on the server**
+
+- Querying the EC2 metadata service if the **Vault server** is on EC2 and has
+ querying capabilities
+
+At present, this endpoint does not confirm that the provided AWS credentials are
+valid AWS credentials with proper permissions.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/aws/config/root` | `204 (empty body)` |
+
+### Parameters
+
+- `access_key` `(string: )` – Specifies the AWS access key ID.
+
+- `secret_key` `(string: )` – Specifies the AWS secret access key.
+
+- `region` `(string: )` – Specifies the AWS region.
+
+### Sample Payload
+
+```json
+{
+ "access_key": "AKIA...",
+ "secret_key": "2J+...",
+ "region": "us-east-1"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/aws/config/root
+```
+
+## Configure Lease
+
+This endpoint configures lease settings for the AWS secret backend. It is
+optional, as there are default values for `lease` and `lease_max`.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/aws/config/lease` | `204 (empty body)` |
+
+### Parameters
+
+- `lease` `(string: )` – Specifies the lease value provided as a
+ string duration with time suffix. "h" (hour) is the largest suffix.
+
+- `lease_max` `(string: )` – Specifies the maximum lease value
+ provided as a string duration with time suffix. "h" (hour) is the largest
+ suffix.
+
+### Sample Payload
+
+```json
+{
+ "lease": "30m",
+ "lease_max": "12h"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/aws/config/lease
+```
+
+## Read Lease
+
+This endpoint returns the current lease settings for the AWS secret backend.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/aws/config/lease` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/aws/config/lease
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "lease": "30m0s",
+ "lease_max": "12h0m0s"
+ }
+}
+```
+
+## Create/Update Role
+
+This endpoint creates or updates the role with the given `name`. If a role with
+the name does not exist, it will be created. If the role exists, it will be
+updated with the new attributes.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/aws/roles/:name` | `204 (empty body)` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of the role to create. This
+ is part of the request URL.
+
+- `policy` `(string: )` – Specifies the IAM policy
+ in JSON format.
+
+- `arn` `(string: )` – Specifies the full ARN
+ reference to the desired existing policy.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/aws/roles/example-role
+```
+
+### Sample Payloads
+
+Using an inline IAM policy:
+
+```json
+{
+ "policy": "{\"Version\": \"...\"}",
+}
+```
+
+Using an ARN:
+
+```json
+{
+ "arn": "arn:aws:iam::123456789012:user/David"
+}
+```
+
+## Read Role
+
+This endpoint queries an existing role by the given name. If the role does not
+exist, a 404 is returned.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/aws/roles/:name` | `200 application/json` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of the role to read. This
+ is part of the request URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/aws/roles/example-role
+```
+
+### Sample Responses
+
+For an inline IAM policy:
+
+```json
+{
+ "data": {
+ "policy": "{\"Version\": \"...\"}"
+ }
+}
+```
+
+For an ARN:
+
+```json
+{
+ "data": {
+ "arn": "arn:aws:iam::123456789012:user/David"
+ }
+}
+```
+
+## List Roles
+
+This endpoint lists all existing roles in the backend.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `LIST` | `/aws/roles` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl
+ --header "X-Vault-Token: ..." \
+ --request LIST \
+ https://vault.rocks/v1/aws/roles
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "keys": [
+ "example-role"
+ ]
+ }
+}
+```
+
+## Delete Role
+
+This endpoint deletes an existing role by the given name. If the role does not
+exist, a 404 is returned.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELET` | `/aws/roles/:name` | `204 (empty body)` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of the role to delete. This
+ is part of the request URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/aws/roles/example-role
+```
+
+## Generate IAM Credentials
+
+This endpoint generates dynamic IAM credentials based on the named role. This
+role must be created before queried.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/aws/creds/:name` | `200 application/json` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of the role to generate
+ credentials againts. This is part of the request URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/aws/creds/example-role
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "access_key": "AKIA...",
+ "secret_key": "xlCs...",
+ "security_token": null
+ }
+}
+```
+
+## Generate IAM with STS
+
+This generates a dynamic IAM credential with an STS token based on the named
+role.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/aws/sts/:name` | `204 (empty body)` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of the role against which
+ to create this STS credential. This is part of the request URL.
+
+- `ttl` `(string: "3600s")` – Specifies the TTL for the use of the STS token.
+ This is specified as a string with a duration suffix. AWS documentation
+ excerpt: `The duration, in seconds, that the credentials should remain valid.
+ Acceptable durations for IAM user sessions range from 900 seconds (15
+ minutes) to 129600 seconds (36 hours), with 43200 seconds (12 hours) as the
+ default. Sessions for AWS account owners are restricted to a maximum of 3600
+ seconds (one hour). If the duration is longer than one hour, the session for
+ AWS account owners defaults to one hour.`
+
+### Sample Payload
+
+```json
+{
+ "ttl": "15m"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/aws/sts/example-role
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "access_key": "AKIA...",
+ "secret_key": "xlCs...",
+ "security_token": "429255"
+ }
+}
+```
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/cassandra/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/cassandra/index.html.md
new file mode 100644
index 0000000..9f9471c
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/cassandra/index.html.md
@@ -0,0 +1,239 @@
+---
+layout: "api"
+page_title: "Cassandra Secret Backend - HTTP API"
+sidebar_current: "docs-http-secret-cassandra"
+description: |-
+ This is the API documentation for the Vault Cassandra secret backend.
+---
+
+# Cassandra Secret Backend HTTP API
+
+This is the API documentation for the Vault Cassandra secret backend. For
+general information about the usage and operation of the Cassandra backend,
+please see the
+[Vault Cassandra backend documentation](/docs/secrets/cassandra/index.html).
+
+This documentation assumes the Cassandra backend is mounted at the `/cassandra`
+path in Vault. Since it is possible to mount secret backends at any location,
+please update your API calls accordingly.
+
+## Configure Connection
+
+This endpoint configures the connection information used to communicate with
+Cassandra.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/cassandra/config/connection` | `204 (empty body)` |
+
+### Parameters
+
+- `hosts` `(string: )` – Specifies a set of comma-delineated Cassandra
+ hosts to connect to.
+
+- `username` `(string: )` – Specifies the username to use for
+ superuser access.
+
+- `password` `(string: )` – Specifies the password corresponding to
+ the given username.
+
+- `tls` `(bool: true)` – Specifies whether to use TLS when connecting to
+ Cassandra.
+
+- `insecure_tls` `(bool: false)` – Specifies whether to skip verification of the
+ server certificate when using TLS.
+
+- `pem_bundle` `(string: "")` – Specifies concatenated PEM blocks containing a
+ certificate and private key; a certificate, private key, and issuing CA
+ certificate; or just a CA certificate.
+
+- `pem_json` `(string: "")` – Specifies JSON containing a certificate and
+ private key; a certificate, private key, and issuing CA certificate; or just a
+ CA certificate. For convenience format is the same as the output of the
+ `issue` command from the `pki` backend; see
+ [the pki documentation](/docs/secrets/pki/index.html).
+
+- `protocol_version` `(int: 2)` – Specifies the CQL protocol version to use.
+
+- `connect_timeout` `(string: "5s")` – Specifies the connection timeout to use.
+
+TLS works as follows:
+
+- If `tls` is set to true, the connection will use TLS; this happens
+ automatically if `pem_bundle`, `pem_json`, or `insecure_tls` is set
+
+- If `insecure_tls` is set to true, the connection will not perform verification
+ of the server certificate; this also sets `tls` to true
+
+- If only `issuing_ca` is set in `pem_json`, or the only certificate in
+ `pem_bundle` is a CA certificate, the given CA certificate will be used for
+ server certificate verification; otherwise the system CA certificates will be
+ used
+
+- If `certificate` and `private_key` are set in `pem_bundle` or `pem_json`,
+ client auth will be turned on for the connection
+
+`pem_bundle` should be a PEM-concatenated bundle of a private key + client
+certificate, an issuing CA certificate, or both. `pem_json` should contain the
+same information; for convenience, the JSON format is the same as that output by
+the issue command from the PKI backend.
+
+### Sample Payload
+
+```json
+{
+ "hosts": "cassandra1.local",
+ "username": "user",
+ "password": "pass"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/cassandra/config/connection
+```
+
+## Create Role
+
+This endpoint creates or updates the role definition.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/cassandra/roles/:name` | `204 (empty body)` |
+
+### Parameters
+
+- `creation_cql` `(string: "")` – Specifies the CQL statements executed to
+ create and configure the new user. Must be a semicolon-separated string, a
+ base64-encoded semicolon-separated string, a serialized JSON string array, or
+ a base64-encoded serialized JSON string array. The '{{username}}' and
+ '{{password}}' values will be substituted; it is required that these
+ parameters are in single quotes. The default creates a non-superuser user with
+ no authorization grants.
+
+- `rollback_cql` `(string: "")` – Specifies the CQL statements executed to
+ attempt a rollback if an error is encountered during user creation. The
+ default is to delete the user. Must be a semicolon-separated string, a
+ base64-encoded semicolon-separated string, a serialized JSON string array, or
+ a base64-encoded serialized JSON string array. The '{{username}}' and
+ '{{password}}' values will be substituted; it is required that these
+ parameters are in single quotes.
+
+- `lease` `(string: "")` – Specifies the lease value provided as a string
+ duration with time suffix. "h" hour is the largest suffix.
+
+- `consistency` `(string: "Quorum")` – Specifies the consistency level value
+ provided as a string. Determines the consistency level used for operations
+ performed on the Cassandra database.
+
+### Sample Payload
+
+```json
+{
+ "creation_cql": "CREATE USER ..."
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/cassandra/roles/my-role
+```
+
+## Read Role
+
+This endpoint queries the role definition.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/cassandra/roles/:name` | `200 application/json` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of the role to read. This
+ is part of the request URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/cassandra/roles/my-role
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "creation_cql": "CREATE USER...",
+ "rollback_cql": "DROP USER...",
+ "lease": "12h",
+ "consistency": "Quorum"
+ }
+}
+```
+
+## Delete Role
+
+This endpoint deletes the role definition.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/cassandra/roles/:name` | `204 (no body)` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of the role to delete. This
+ is part of the request URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/cassandra/roles/my-role
+```
+
+## Generate Credentials
+
+This endpoint generates a new set of dynamic credentials based on the named
+role.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/cassandra/creds/:name` | `200 application/json` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of the role to create
+ credentials against. This is part of the request URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/cassandra/creds/my-role
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "username": "vault-root-1430158508-126",
+ "password": "132ae3ef-5a64-7499-351e-bfe59f3a2a21"
+ }
+}
+```
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/consul/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/consul/index.html.md
new file mode 100644
index 0000000..b91e956
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/consul/index.html.md
@@ -0,0 +1,230 @@
+---
+layout: "api"
+page_title: "Consul Secret Backend - HTTP API"
+sidebar_current: "docs-http-secret-consul"
+description: |-
+ This is the API documentation for the Vault Consul secret backend.
+---
+
+# Consul Secret Backend HTTP API
+
+This is the API documentation for the Vault Consul secret backend. For general
+information about the usage and operation of the Consul backend, please see the
+[Vault Consul backend documentation](/docs/secrets/consul/index.html).
+
+This documentation assumes the Consul backend is mounted at the `/consul` path
+in Vault. Since it is possible to mount secret backends at any location, please
+update your API calls accordingly.
+
+## Configure Access
+
+This endpoint configures the access information for Consul. This access
+information is used so that Vault can communicate with Consul and generate
+Consul tokens.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/consul/config/access` | `204 (empty body)` |
+
+### Parameters
+
+- `address` `(string: )` – Specifies the address of the Consul
+ instance, provided as `"host:port"` like `"127.0.0.1:8500"`.
+
+- `scheme` `(string: "http")` – Specifies the URL scheme to use.
+
+- `token` `(string: )` – Specifies the Consul ACL token to use. This
+ must be a management type token.
+
+### Sample Payload
+
+```json
+{
+ "address": "127.0.0.1:8500",
+ "scheme": "https",
+ "token": "adha..."
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --request POST \
+ --header "X-Vault-Token: ..." \
+ --data @payload.json \
+ https://vault.rocks/v1/consul/config/access
+```
+
+## Create/Update Role
+
+This endpoint creates or updates the Consul role definition. If the role does
+not exist, it will be created. If the role already exists, it will receive
+updated attributes.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/consul/roles/:name` | `204 (empty body)` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of an existing role against
+ which to create this Consul credential. This is part of the request URL.
+
+- `lease` `(string: "")` – Specifies the lease for this role. This is provided
+ as a string duration with a time suffix like `"30s"` or `"1h"`. If not
+ provided, the default Vault lease is used.
+
+- `policy` `(string: )` – Specifies the base64 encoded ACL policy. The
+ ACL format can be found in the [Consul ACL
+ documentation](https://www.consul.io/docs/internals/acl.html). This is
+ required unless the `token_type` is `management`.
+
+- `token_type` `(string: "client")` - Specifies the type of token to create when
+ using this role. Valid values are `"client"` or `"management"`.
+
+### Sample Payload
+
+To create management tokens:
+
+```json
+{
+ "token_type": "management"
+}
+```
+
+To create a client token with a custom policy:
+
+```json
+{
+ "policy": "abd2...=="
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --request POST \
+ --header "X-Vault-Token: ..." \
+ --data @payload.json \
+ https://vault.rocks/v1/consul/roles/example-role
+```
+
+## Read Role
+
+This endpoint queries for information about a Consul role with the given name.
+If no role exists with that name, a 404 is returned.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/consul/roles/:name` | `200 application/json` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of the role to query. This
+ is part of the request URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/consul/roles/example-role
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "policy": "abd2...==",
+ "lease": "1h0m0s",
+ "token_type": "client"
+ }
+}
+```
+
+## List Roles
+
+This endpoint lists all existing roles in the backend.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `LIST` | `/consul/roles` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request LIST \
+ https://vault.rocks/v1/consul/roles
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "keys": [
+ "example-role"
+ ]
+ }
+}
+```
+
+## Delete Role
+
+This endpoint deletes a Consul role with the given name. Even if the role does
+not exist, this endpoint will still return a successful response.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/consul/roles/:name` | `204 (empty body)` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of the role to delete. This
+ is part of the request URL.
+
+### Sample Request
+
+```
+$ curl \
+ --request DELETE \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/consul/roles/example-role
+```
+
+## Generate Credential
+
+This endpoint generates a dynamic Consul token based on the given role
+definition.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/consul/creds/:name` | `200 application/json` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of an existing role against
+ which to create this Consul credential. This is part of the request URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/consul/creds/example-role
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "token": "973a31ea-1ec4-c2de-0f63-623f477c2510"
+ }
+}
+```
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/cubbyhole/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/cubbyhole/index.html.md
new file mode 100644
index 0000000..903baaa
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/cubbyhole/index.html.md
@@ -0,0 +1,155 @@
+---
+layout: "api"
+page_title: "Cubbyhole Secret Backend - HTTP API"
+sidebar_current: "docs-http-secret-cubbyhole"
+description: |-
+ This is the API documentation for the Vault Cubbyhole secret backend.
+---
+
+# Cubbyhole Secret Backend HTTP API
+
+This is the API documentation for the Vault Cubbyhole secret backend. For
+general information about the usage and operation of the Cubbyhole backend,
+please see the
+[Vault Cubbyhole backend documentation](/docs/secrets/cubbyhole/index.html).
+
+This documentation assumes the Cubbyhole backend is mounted at the `/cubbyhole`
+path in Vault. Since it is possible to mount secret backends at any location,
+please update your API calls accordingly.
+
+## Read Secret
+
+This endpoint retrieves the secret at the specified location.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/cubbyhole/:path` | `200 application/json` |
+
+### Parameters
+
+- `path` `(string: )` – Specifies the path of the secret to read.
+ This is specified as part of the URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/cubbyhole/my-secret
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "data": {
+ "foo": "bar"
+ },
+ "lease_duration": 0,
+ "lease_id": "",
+ "renewable": false
+}
+```
+
+## List Secrets
+
+This endpoint returns a list of secret entries at the specified location.
+Folders are suffixed with `/`. The input must be a folder; list on a file will
+not return a value. The values themselves are not accessible via this command.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `List` | `/cubbyhole/:path` | `200 application/json` |
+
+### Parameters
+
+- `path` `(string: )` – Specifies the path of the secrets to list.
+ This is specified as part of the URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request LIST \
+ https://vault.rocks/v1/cubbyhole/my-secret
+```
+
+### Sample Response
+
+The example below shows output for a query path of `cubbyhole/` when there are
+secrets at `cubbyhole/foo` and `cubbyhole/foo/bar`; note the difference in the
+two entries.
+
+```json
+{
+ "auth": null,
+ "data": {
+ "keys": ["foo", "foo/"]
+ },
+ "lease_duration": 2764800,
+ "lease_id": "",
+ "renewable": false
+}
+```
+
+## Create/Update Secret
+
+This endpoint stores a secret at the specified location.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/cubbyhole/:path` | `204 (empty body)` |
+| `PUT` | `/cubbyhole/:path` | `204 (empty body)` |
+
+### Parameters
+
+- `path` `(string: )` – Specifies the path of the secrets to
+ create/update. This is specified as part of the URL.
+
+- `:key` `(string: "")` – Specifies a key, paired with an associated value, to
+ be held at the given location. Multiple key/value pairs can be specified, and
+ all will be returned on a read operation. A key called `ttl` will trigger some
+ special behavior; see above for details.
+
+### Sample Payload
+
+```json
+{
+ "foo": "bar",
+ "zip": "zap"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/cubbyhole/my-secret
+```
+
+## Delete Secret
+
+This endpoint deletes the secret at the specified location.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/cubbyhole/:path` | `204 (empty body)` |
+
+### Parameters
+
+- `path` `(string: )` – Specifies the path of the secret to delete.
+ This is specified as part of the URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/cubbyhole/my-secret
+```
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/generic/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/generic/index.html.md
new file mode 100644
index 0000000..be00171
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/generic/index.html.md
@@ -0,0 +1,159 @@
+---
+layout: "api"
+page_title: "Generic Secret Backend - HTTP API"
+sidebar_current: "docs-http-secret-generic"
+description: |-
+ This is the API documentation for the Vault Generic secret backend.
+---
+
+# Generic Secret Backend HTTP API
+
+This is the API documentation for the Vault Generic secret backend. For general
+information about the usage and operation of the Generic backend, please see
+the [Vault Generic backend documentation](/docs/secrets/generic/index.html).
+
+This documentation assumes the Generic backend is mounted at the `/secret`
+path in Vault. Since it is possible to mount secret backends at any location,
+please update your API calls accordingly.
+
+## Read Secret
+
+This endpoint retrieves the secret at the specified location.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/secret/:path` | `200 application/json` |
+
+### Parameters
+
+- `path` `(string: )` – Specifies the path of the secret to read.
+ This is specified as part of the URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/secret/my-secret
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "data": {
+ "foo": "bar"
+ },
+ "lease_duration": 2764800,
+ "lease_id": "",
+ "renewable": false
+}
+```
+
+## List Secrets
+
+This endpoint returns a list of key names at the specified location. Folders are
+suffixed with `/`. The input must be a folder; list on a file will not return a
+value. Note that no policy-based filtering is performed on keys; do not encode
+sensitive information in key names. The values themselves are not accessible via
+this command.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `LIST` | `/secret/:path` | `200 application/json` |
+
+### Parameters
+
+- `path` `(string: )` – Specifies the path of the secrets to list.
+ This is specified as part of the URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request LIST \
+ https://vault.rocks/v1/secret/my-secret
+```
+
+### Sample Response
+
+The example below shows output for a query path of `secret/` when there are
+secrets at `secret/foo` and `secret/foo/bar`; note the difference in the two
+entries.
+
+```json
+{
+ "auth": null,
+ "data": {
+ "keys": ["foo", "foo/"]
+ },
+ "lease_duration": 2764800,
+ "lease_id": "",
+ "renewable": false
+}
+```
+
+## Create/Update Secret
+
+This endpoint stores a secret at the specified location. If the value does not
+yet exist, the calling token must have an ACL policy granting the `create`
+capability. If the value already exists, the calling token must have an ACL
+policy granting the `update` capability.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/secret/:path` | `204 (empty body)` |
+| `PUT` | `/secret/:path` | `204 (empty body)` |
+
+### Parameters
+
+- `path` `(string: )` – Specifies the path of the secrets to
+ create/update. This is specified as part of the URL.
+
+- `:key` `(string: "")` – Specifies a key, paired with an associated value, to
+ be held at the given location. Multiple key/value pairs can be specified, and
+ all will be returned on a read operation. A key called `ttl` will trigger some
+ special behavior; see above for details.
+
+### Sample Payload
+
+```json
+{
+ "foo": "bar",
+ "zip": "zap"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/secret/my-secret
+```
+
+## Delete Secret
+
+This endpoint deletes the secret at the specified location.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/secret/:path` | `204 (empty body)` |
+
+### Parameters
+
+- `path` `(string: )` – Specifies the path of the secret to delete.
+ This is specified as part of the URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/secret/my-secret
+```
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/index.html.md
new file mode 100644
index 0000000..2912451
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/index.html.md
@@ -0,0 +1,19 @@
+---
+layout: "api"
+page_title: "HTTP API"
+sidebar_current: "docs-http-secret"
+description: |-
+ Each secret backend publishes its own set of API paths and methods. These
+ endpoints are documented in this section.
+---
+
+# Secret Backends
+
+Each secret backend publishes its own set of API paths and methods. These
+endpoints are documented in this section. Secret backends are mounted at a path,
+but the documentation will assume the default mount points for simplicity. If
+you are mounting at a different path, you should adjust your API calls
+accordingly.
+
+For the API documentation for a specific secret backend, please choose a secret
+backend from the navigation.
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/mongodb/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/mongodb/index.html.md
new file mode 100644
index 0000000..e833b5c
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/mongodb/index.html.md
@@ -0,0 +1,344 @@
+---
+layout: "api"
+page_title: "MongoDB Secret Backend - HTTP API"
+sidebar_current: "docs-http-secret-mongodb"
+description: |-
+ This is the API documentation for the Vault MongoDB secret backend.
+---
+
+# MongoDB Secret Backend HTTP API
+
+This is the API documentation for the Vault MongoDB secret backend. For general
+information about the usage and operation of the MongoDB backend, please see
+the [Vault MongoDB backend documentation](/docs/secrets/mongodb/index.html).
+
+This documentation assumes the MongoDB backend is mounted at the `/mongodb`
+path in Vault. Since it is possible to mount secret backends at any location,
+please update your API calls accordingly.
+
+## Configure Connection
+
+This endpoint configures the standard connection string (URI) used to
+communicate with MongoDB.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/mongodb/config/connection` | `200 application/json` |
+
+### Parameters
+
+- `url` `(string: )` – Specifies the MongoDB standard connection
+ string (URI).
+
+- `verify_connection` `(bool: true)` – Specifies if the connection is verified
+ during initial configuration.
+
+### Sample Payload
+
+```json
+{
+ "url": "mongodb://db1.example.net,db2.example.net:2500/?replicaSet=test"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/mongodb/config/connection
+```
+
+### Sample Response
+
+```json
+{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": 0,
+ "data": null,
+ "wrap_info": null,
+ "warnings": [
+ "Read access to this endpoint should be controlled via ACLs as it will return the connection URI as it is, including passwords, if any."
+ ],
+ "auth": null
+}
+```
+
+## Read Connection
+
+This endpoint queries the connection configuration. Access to this endpoint
+should be controlled via ACLs as it will return the connection URI as it is,
+including passwords, if any.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/mongodb/config/connection` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/mongodb/config/connection
+```
+
+### Sample Response
+
+```json
+{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": 0,
+ "data": {
+ "uri": "mongodb://admin:Password!@mongodb.acme.com:27017/admin?ssl=true"
+ },
+ "wrap_info": null,
+ "warnings": null,
+ "auth": null
+}
+```
+
+## Configure Lease
+
+This endpoint configures the default lease TTL settings for credentials
+generated by the mongodb backend.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/mongodb/config/lease` | `204 (empty body)` |
+
+### Parameters
+
+- `lease` `(string: )` – Specifies the lease value provided as a
+ string duration with time suffix. "h" (hour) is the largest suffix.
+
+- `lease_max` `(string: )` – Specifies the maximum lease value
+ provided as a string duration with time suffix. "h" (hour) is the largest
+ suffix.
+
+### Sample Payload
+
+```json
+{
+ "lease": "12h",
+ "lease_max": "24h"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/mongodb/config/lease
+```
+
+## Read Lease
+
+This endpoint queries the lease configuration.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/mongodb/config/lease` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/mongodb/config/lease
+```
+
+### Sample Response
+
+```json
+{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": 0,
+ "data": {
+ "max_ttl": 60,
+ "ttl": 60
+ },
+ "wrap_info": null,
+ "warnings": null,
+ "auth": null
+}
+```
+
+## Create Role
+
+This endpoint creates or updates a role definition.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/mongodb/roles/:name` | `204 (empty body)` |
+
+### Parameters
+
+- `db` `(string: )` – Specifies the name of the database users should
+ be created in for this role.
+
+- `roles` `(string: "")` – Specifies the MongoDB roles to assign to the users
+ generated for this role.
+
+### Sample Payload
+
+```json
+{
+ "db": "my-db",
+ "roles": "[\"readWrite\",{\"db\":\"bar\",\"role\":\"read\"}]"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/mongodb/roles/my-role
+```
+
+## Read Role
+
+This endpoint queries the role definition.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/mongodb/roles/:name` | `200 application/json` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of the role to read. This
+ is specified as part of the URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/mongodb/roles/my-role
+```
+
+### Sample Response
+
+```json
+{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": 0,
+ "data": {
+ "db": "foo",
+ "roles": "[\"readWrite\",{\"db\":\"bar\",\"role\":\"read\"}]"
+ },
+ "wrap_info": null,
+ "warnings": null,
+ "auth": null
+}
+```
+
+## List Roles
+
+This endpoint returns a list of available roles. Only the role names are
+returned, not any values.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `LIST` | `/mongodb/roles` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request LIST \
+ https://vault.rocks/v1/mongodb/roles
+```
+
+### Sample Response
+
+```json
+{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": 0,
+ "data": {
+ "keys": [
+ "dev",
+ "prod"
+ ]
+ },
+ "wrap_info": null,
+ "warnings": null,
+ "auth": null
+}
+```
+
+## Delete Role
+
+This endpoint deletes the role definition.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/mongodb/roles/:name` | `204 (empty body)` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of the role to delete. This
+ is specified as part of the URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/mongodb/roles/my-role
+```
+
+## Generate Credentials
+
+This endpoint generates a new set of dynamic credentials based on the named
+role.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/mongodb/creds/:name` | `200 application/json` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of the role to create
+ credentials against. This is specified as part of the URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/mongodb/creds/my-role
+```
+
+### Sample Response
+
+```json
+{
+ "lease_id": "mongodb/creds/readonly/e64e79d8-9f56-e379-a7c5-373f9b4ee3d8",
+ "renewable": true,
+ "lease_duration": 3600,
+ "data": {
+ "db": "foo",
+ "password": "de0f7b50-d700-54e5-4e81-5c3724283999",
+ "username": "vault-token-b32098cb-7ff2-dcf5-83cd-d5887cedf81b"
+ },
+ "wrap_info": null,
+ "warnings": null,
+ "auth": null
+}
+```
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/mssql/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/mssql/index.html.md
new file mode 100644
index 0000000..678eea5
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/mssql/index.html.md
@@ -0,0 +1,244 @@
+---
+layout: "api"
+page_title: "MSSQL Secret Backend - HTTP API"
+sidebar_current: "docs-http-secret-mssql"
+description: |-
+ This is the API documentation for the Vault MSSQL secret backend.
+---
+
+# MSSQL Secret Backend HTTP API
+
+This is the API documentation for the Vault MSSQL secret backend. For general
+information about the usage and operation of the MSSQL backend, please see
+the [Vault MSSQL backend documentation](/docs/secrets/mssql/index.html).
+
+This documentation assumes the MSSQL backend is mounted at the `/mssql`
+path in Vault. Since it is possible to mount secret backends at any location,
+please update your API calls accordingly.
+
+## Configure Connection
+
+This endpoint configures the connection DSN used to communicate with Microsoft
+SQL Server.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/mssql/config/connection` | `204 (empty body)` |
+
+### Parameters
+
+- `connection_string` `(string: )` – Specifies the MSSQL DSN.
+
+- `max_open_connections` `(int: 2)` – Specifies the maximum number of open
+ connections to the database.
+
+- `max_idle_connections` `(int: 0)` – Specifies the maximum number of idle
+ connections to the database. A zero uses the value of `max_open_connections`
+ and a negative value disables idle connections. If larger than
+ `max_open_connections` it will be reduced to be equal.
+
+### Sample Payload
+
+```json
+{
+ "connection_string": "Server=myServerAddress;Database=myDataBase;User Id=myUsername; Password=myPassword;"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/mssql/config/connection
+```
+
+## Configure Lease
+
+This endpoint configures the lease settings for generated credentials.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/mysql/config/lease` | `204 (empty body)` |
+
+### Parameters
+
+- `lease` `(string: )` – Specifies the lease value provided as a
+ string duration with time suffix. "h" (hour) is the largest suffix.
+
+- `lease_max` `(string: )` – Specifies the maximum lease value
+ provided as a string duration with time suffix. "h" (hour) is the largest
+ suffix.
+
+### Sample Payload
+
+```json
+{
+ "lease": "12h",
+ "lease_max": "24h"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/mssql/config/lease
+```
+
+## Create Role
+
+This endpoint creates or updates the role definition.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/mssql/roles/:name` | `204 (empty body)` |
+
+### Parameters
+
+- `sql` `(string: )` – Specifies the SQL statements executed to create
+ and configure the role. The '{{name}}' and '{{password}}' values will be
+ substituted. Must be a semicolon-separated string, a base64-encoded
+ semicolon-separated string, a serialized JSON string array, or a
+ base64-encoded serialized JSON string array.
+
+### Sample Payload
+
+```json
+{
+ "sql": "CREATE LOGIN ..."
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/mssql/roles/my-role
+```
+
+## Read Role
+
+This endpoint queries the role definition.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/mssql/roles/:name` | `200 application/json` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of the role to read. This
+ is specified as part of the URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/mssql/roles/my-role
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "sql": "CREATE LOGIN..."
+ }
+}
+```
+
+## List Roles
+
+This endpoint returns a list of available roles. Only the role names are
+returned, not any values.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `LIST` | `/mssql/roles` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request LIST \
+ https://vault.rocks/v1/mssql/roles
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "data": {
+ "keys": ["dev", "prod"]
+ },
+ "lease_duration": 2764800,
+ "lease_id": "",
+ "renewable": false
+}
+```
+
+## Delete Role
+
+This endpoint deletes the role definition.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/mssql/roles/:name` | `204 (empty body)` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of the role to delete. This
+ is specified as part of the URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/mssql/roles/my-role
+```
+
+## Generate Credentials
+
+This endpoint generates a new set of dynamic credentials based on the named
+role.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/mssql/creds/:name` | `200 application/json` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of the role to create
+ credentials against. This is specified as part of the URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/mssql/creds/my-role
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "username": "root-a147d529-e7d6-4a16-8930-4c3e72170b19",
+ "password": "ee202d0d-e4fd-4410-8d14-2a78c5c8cb76"
+ }
+}
+```
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/mysql/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/mysql/index.html.md
new file mode 100644
index 0000000..1d4bb90
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/mysql/index.html.md
@@ -0,0 +1,265 @@
+---
+layout: "api"
+page_title: "MySQL Secret Backend - HTTP API"
+sidebar_current: "docs-http-secret-mysql"
+description: |-
+ This is the API documentation for the Vault MySQL secret backend.
+---
+
+# MySQL Secret Backend HTTP API
+
+This is the API documentation for the Vault MySQL secret backend. For general
+information about the usage and operation of the MySQL backend, please see
+the [Vault MySQL backend documentation](/docs/secrets/mysql/index.html).
+
+This documentation assumes the MySQL backend is mounted at the `/mysql`
+path in Vault. Since it is possible to mount secret backends at any location,
+please update your API calls accordingly.
+
+## Configure Connection
+
+This endpoint configures the connection DSN used to communicate with MySQL.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/mysql/config/connection` | `204 (empty body)` |
+
+### Parameters
+
+- `connection_url` `(string: )` – Specifies the MySQL DSN.
+
+- `max_open_connections` `(int: 2)` – Specifies the maximum number of open
+ connections to the database.
+
+- `max_idle_connections` `(int: 0)` – Specifies the maximum number of idle
+ connections to the database. A zero uses the value of `max_open_connections`
+ and a negative value disables idle connections. If larger than
+ `max_open_connections` it will be reduced to be equal.
+
+- `verify_connection` `(bool: true)` – Specifies if the connection is verified
+ during initial configuration.
+
+### Sample Payload
+
+```json
+{
+ "connection_url": "mysql:host=localhost;dbname=testdb"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/mysql/config/connection
+```
+
+## Configure Lease
+
+This endpoint configures the lease settings for generated credentials. If not
+configured, leases default to 1 hour. This is a root protected endpoint.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/mysql/config/lease` | `204 (empty body)` |
+
+### Parameters
+
+- `lease` `(string: )` – Specifies the lease value provided as a
+ string duration with time suffix. "h" (hour) is the largest suffix.
+
+- `lease_max` `(string: )` – Specifies the maximum lease value
+ provided as a string duration with time suffix. "h" (hour) is the largest
+ suffix.
+
+### Sample Payload
+
+```json
+{
+ "lease": "12h",
+ "lease_max": "24h"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/mysql/config/lease
+```
+
+## Create Role
+
+This endpoint creates or updates the role definition.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/mysql/roles/:name` | `204 (empty body)` |
+
+### Parameters
+
+- `sql` `(string: )` – Specifies the SQL statements executed to create
+ and configure a user. Must be a semicolon-separated string, a base64-encoded
+ semicolon-separated string, a serialized JSON string array, or a
+ base64-encoded serialized JSON string array. The '{{name}}' and
+ '{{password}}' values will be substituted.
+
+- `revocation_sql` `(string: "")` – Specifies the SQL statements executed to
+ revoke a user. Must be a semicolon-separated string, a base64-encoded
+ semicolon-separated string, a serialized JSON string array, or a
+ base64-encoded serialized JSON string array. The '{{name}}' value will be
+ substituted.
+
+- `rolename_length` `(int: 4)` – Specifies how many characters from the role
+ name will be used to form the mysql username interpolated into the '{{name}}'
+ field of the sql parameter.
+
+- `displayname_length` `(int: 4)` – Specifies how many characters from the token
+ display name will be used to form the mysql username interpolated into the
+ '{{name}}' field of the sql parameter.
+
+- `username_length` `(int: 16)` – Specifies the maximum total length in
+ characters of the mysql username interpolated into the '{{name}}' field of the
+ sql parameter.
+
+### Sample Payload
+
+```json
+{
+ "sql": "CREATE USER ..."
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/mysql/roles/my-role
+```
+
+## Read Role
+
+This endpoint queries the role definition.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/mysql/roles/:name` | `200 application/json` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of the role to read. This
+ is specified as part of the URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/mysql/roles/my-role
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "sql": "CREATE USER..."
+ }
+}
+```
+
+## List Roles
+
+This endpoint returns a list of available roles. Only the role names are
+returned, not any values.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `LIST` | `/mysql/roles` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request LIST \
+ https://vault.rocks/v1/mysql/roles
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "data": {
+ "keys": ["dev", "prod"]
+ },
+ "lease_duration": 2764800,
+ "lease_id": "",
+ "renewable": false
+}
+```
+
+## Delete Role
+
+This endpoint deletes the role definition.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/mysql/roles/:name` | `204 (empty body)` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of the role to delete. This
+ is specified as part of the URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/mysql/roles/my-role
+```
+
+## Generate Credentials
+
+This endpoint generates a new set of dynamic credentials based on the named
+role.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/mysql/creds/:name` | `200 application/json` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of the role to create
+ credentials against. This is specified as part of the URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/mysql/creds/my-role
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "username": "user-role-aefa63",
+ "password": "132ae3ef-5a64-7499-351e-bfe59f3a2a21"
+ }
+}
+```
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/pki/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/pki/index.html.md
new file mode 100644
index 0000000..37eadb0
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/pki/index.html.md
@@ -0,0 +1,1219 @@
+---
+layout: "api"
+page_title: "PKI Secret Backend - HTTP API"
+sidebar_current: "docs-http-secret-pki"
+description: |-
+ This is the API documentation for the Vault PKI secret backend.
+---
+
+# PKI Secret Backend HTTP API
+
+This is the API documentation for the Vault PKI secret backend. For general
+information about the usage and operation of the PKI backend, please see the
+[Vault PKI backend documentation](/docs/secrets/pki/index.html).
+
+This documentation assumes the PKI backend is mounted at the `/pki` path in
+Vault. Since it is possible to mount secret backends at any location, please
+update your API calls accordingly.
+
+## Read CA Certificate
+
+This endpoint retrieves the CA certificate *in raw DER-encoded form*. This is a
+bare endpoint that does not return a standard Vault data structure. If `/pem` is
+added to the endpoint, the CA certificate is returned in PEM format.
+
+This is an unauthenticated endpoint.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/pki/ca(/pem)` | `200 application/binary` |
+
+### Sample Request
+
+```
+$ curl \
+ https://vault.rocks/v1/pki/ca/pem
+```
+
+### Sample Response
+
+```
+
+```
+
+## Read CA Certificate Chain
+
+This endpoint retrieves the CA certificate chain, including the CA _in PEM
+format_. This is a bare endpoint that does not return a standard Vault data
+structure.
+
+This is an unauthenticated endpoint.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/pki/ca_chain` | `200 application/binary` |
+
+### Sample Request
+
+```
+$ curl \
+ https://vault.rocks/v1/pki/ca_chain
+```
+
+### Sample Response
+
+```
+
+```
+
+## Read Certificate
+
+This endpoint retrieves one of a selection of certificates. This endpoint returns the certificate in PEM formatting in the
+`certificate` key of the JSON object.
+
+This is an unauthenticated endpoint.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/pki/cert/:serial` | `200 application/json` |
+
+### Parameters
+
+- `serial` `(string: )` – Specifies the serial of the key to read.
+ This is part of the request URL. Valid values: are:
+
+ - `ca` for the CA certificate
+ - `crl` for the current CRL
+ - `ca_chain` for the CA trust chain or a serial number in either hyphen-separated or colon-separated octal format
+
+### Sample Request
+
+```
+$ curl \
+ https://vault.rocks/v1/pki/cert/crl
+```
+
+### Sample Response
+```json
+{
+ "data": {
+ "certificate": "-----BEGIN CERTIFICATE-----\nMIIGmDCCBYCgAwIBAgIHBzEB3fTzhTANBgkqhkiG9w0BAQsFADCBjDELMAkGA1UE\n..."
+ }
+}
+```
+
+
+## List Certificates
+
+This endpoint returns a list of the current certificates by serial number only.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `LIST` | `/pki/certs` | `200 application/json` |
+
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request LIST \
+ https://vault.rocks/v1/pki/certs
+```
+
+### Sample Response
+
+```json
+{
+ "lease_id":"",
+ "renewable":false,
+ "lease_duration":0,
+ "data":{
+ "keys":[
+ "17:67:16:b0:b9:45:58:c0:3a:29:e3:cb:d6:98:33:7a:a6:3b:66:c1",
+ "26:0f:76:93:73:cb:3f:a0:7a:ff:97:85:42:48:3a:aa:e5:96:03:21"
+ ]
+ },
+ "wrap_info":null,
+ "warnings":null,
+ "auth":null
+}
+```
+
+## Submit CA Information
+
+This endpoint allows submitting the CA information for the backend via a PEM
+file containing the CA certificate and its private key, concatenated. Not needed
+if you are generating a self-signed root certificate, and not used if you have a
+signed intermediate CA certificate with a generated key (use the
+`/pki/intermediate/set-signed` endpoint for that). _If you have already set a
+certificate and key, they will be overridden._
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/pki/config/ca` | `204 (empty body)` |
+
+### Parameters
+
+- `pem_bundle` `(string: )` – Specifies the key and certificate concatenated in PEM format.
+
+### Sample Request
+
+```text
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data "@payload.json" \
+ http://127.0.0.1:8200/v1/pki/config/ca
+```
+
+Note that if you provide the data through the HTTP API it must be
+JSON-formatted, with newlines replaced with `\n`, like so:
+
+```json
+{
+ "pem_bundle": "-----BEGIN RSA PRIVATE KEY-----\n...\n-----END CERTIFICATE-----"
+}
+```
+
+## Read CRL Configuration
+
+This endpoint allows getting the duration for which the generated CRL should be
+marked valid.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/pki/config/crl` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/pki/config/crl
+```
+
+### Sample Response
+
+```json
+{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": 0,
+ "data": {
+ "expiry": "72h"
+ },
+ "auth": null
+}
+```
+
+## Set CRL Configuration
+
+This endpoint allows setting the duration for which the generated CRL should be
+marked valid.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/pki/config/crl` | `204 (empty body)` |
+
+### Parameters
+
+- `expiry` `(string: "72h")` – Specifies the time until expiration.
+
+### Sample Payload
+
+```json
+{
+ "expiry": "48h"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/pki/config/crl
+```
+
+## Read URLs
+
+This endpoint fetches the URLs to be encoded in generated certificates.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/pki/config/urls` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/pki/config/urls
+```
+
+### Sample Response
+
+```json
+{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": 0,
+ "data": {
+ "issuing_certificates": ["", ""],
+ "crl_distribution_points": ["", "