diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
new file mode 100644
index 0000000..ed0e5ee
--- /dev/null
+++ b/Godeps/Godeps.json
@@ -0,0 +1,87 @@
+{
+ "ImportPath": "github.com/Luzifer/cloudkeys-go",
+ "GoVersion": "go1.4.2",
+ "Deps": [
+ {
+ "ImportPath": "github.com/Luzifer/rconfig",
+ "Rev": "0c78105a26af5663b6bb2c5be1fed4ed7d81d687"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/aws",
+ "Comment": "v0.6.4-2-g168a70b",
+ "Rev": "168a70b9c21a4f60166d7925b690356605907adb"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/internal/endpoints",
+ "Comment": "v0.6.4-2-g168a70b",
+ "Rev": "168a70b9c21a4f60166d7925b690356605907adb"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/query",
+ "Comment": "v0.6.4-2-g168a70b",
+ "Rev": "168a70b9c21a4f60166d7925b690356605907adb"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/rest",
+ "Comment": "v0.6.4-2-g168a70b",
+ "Rev": "168a70b9c21a4f60166d7925b690356605907adb"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/restxml",
+ "Comment": "v0.6.4-2-g168a70b",
+ "Rev": "168a70b9c21a4f60166d7925b690356605907adb"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil",
+ "Comment": "v0.6.4-2-g168a70b",
+ "Rev": "168a70b9c21a4f60166d7925b690356605907adb"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/internal/signer/v4",
+ "Comment": "v0.6.4-2-g168a70b",
+ "Rev": "168a70b9c21a4f60166d7925b690356605907adb"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/service/s3",
+ "Comment": "v0.6.4-2-g168a70b",
+ "Rev": "168a70b9c21a4f60166d7925b690356605907adb"
+ },
+ {
+ "ImportPath": "github.com/flosch/pongo2",
+ "Comment": "v1.0-rc1-154-g40edabd",
+ "Rev": "40edabd47a6af304367c79b67afaa57faf1b79c8"
+ },
+ {
+ "ImportPath": "github.com/gorilla/context",
+ "Rev": "215affda49addc4c8ef7e2534915df2c8c35c6cd"
+ },
+ {
+ "ImportPath": "github.com/gorilla/mux",
+ "Rev": "8096f47503459bcc74d1f4c487b7e6e42e5746b5"
+ },
+ {
+ "ImportPath": "github.com/gorilla/securecookie",
+ "Rev": "1b0c7f6e9ab3d7f500fd7d50c7ad835ff428139b"
+ },
+ {
+ "ImportPath": "github.com/gorilla/sessions",
+ "Rev": "aa5e036e6c44aec69a32eb41097001978b29ad31"
+ },
+ {
+ "ImportPath": "github.com/satori/go.uuid",
+ "Rev": "242673bbc820e051ef00033e274d32e08ece9e15"
+ },
+ {
+ "ImportPath": "github.com/spf13/pflag",
+ "Rev": "67cbc198fd11dab704b214c1e629a97af392c085"
+ },
+ {
+ "ImportPath": "github.com/vaughan0/go-ini",
+ "Rev": "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1"
+ },
+ {
+ "ImportPath": "github.com/xuyu/goredis",
+ "Rev": "300f7e8cf453e2ea44337b3969d3aecf1a92ebe3"
+ }
+ ]
+}
diff --git a/Godeps/_workspace/.gitignore b/Godeps/_workspace/.gitignore
new file mode 100644
index 0000000..f037d68
--- /dev/null
+++ b/Godeps/_workspace/.gitignore
@@ -0,0 +1,2 @@
+/pkg
+/bin
diff --git a/Godeps/_workspace/src/github.com/Luzifer/rconfig/LICENSE b/Godeps/_workspace/src/github.com/Luzifer/rconfig/LICENSE
new file mode 100644
index 0000000..4fde5d2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Luzifer/rconfig/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2015 Knut Ahlers
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/Luzifer/rconfig/README.md b/Godeps/_workspace/src/github.com/Luzifer/rconfig/README.md
new file mode 100644
index 0000000..cebe16a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Luzifer/rconfig/README.md
@@ -0,0 +1,58 @@
+[![Circle CI](https://circleci.com/gh/Luzifer/rconfig.svg?style=svg)](https://circleci.com/gh/Luzifer/rconfig)
+[![License: Apache v2.0](https://badge.luzifer.io/v1/badge?color=5d79b5&title=license&text=Apache+v2.0)](http://www.apache.org/licenses/LICENSE-2.0)
+[![Documentation](https://badge.luzifer.io/v1/badge?title=godoc&text=reference)](https://godoc.org/github.com/Luzifer/rconfig)
+
+## Description
+
+> Package rconfig implements a CLI configuration reader with struct-embedded defaults, environment variables and posix compatible flag parsing using the [pflag](https://github.com/spf13/pflag) library.
+
+## Installation
+
+Install by running:
+
+```
+go get -u github.com/Luzifer/rconfig
+```
+
+Run tests by running:
+
+```
+go test -v -race -cover github.com/Luzifer/rconfig
+```
+
+## Usage
+
+As a first step define a struct holding your configuration:
+
+```go
+type config struct {
+ Username string `default:"unknown" flag:"user" description:"Your name"`
+ Details struct {
+ Age int `default:"25" flag:"age" env:"age" description:"Your age"`
+ }
+}
+```
+
+Next create an instance of that struct and let `rconfig` fill that config:
+
+```go
+var cfg config
+func init() {
+ cfg = config{}
+ rconfig.Parse(&cfg)
+}
+```
+
+You're ready to access your configuration:
+
+```go
+func main() {
+ fmt.Printf("Hello %s, happy birthday for your %dth birthday.",
+ cfg.Username,
+ cfg.Details.Age)
+}
+```
+
+## More info
+
+You can see the full reference documentation of the rconfig package [at godoc.org](https://godoc.org/github.com/Luzifer/rconfig), or through go's standard documentation system by running `godoc -http=:6060` and browsing to [http://localhost:6060/pkg/github.com/Luzifer/rconfig](http://localhost:6060/pkg/github.com/Luzifer/rconfig) after installation.
diff --git a/Godeps/_workspace/src/github.com/Luzifer/rconfig/config.go b/Godeps/_workspace/src/github.com/Luzifer/rconfig/config.go
new file mode 100644
index 0000000..4a5aa00
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Luzifer/rconfig/config.go
@@ -0,0 +1,282 @@
+// Package rconfig implements a CLI configuration reader with struct-embedded
+// defaults, environment variables and posix compatible flag parsing using
+// the pflag library.
+package rconfig
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "reflect"
+ "strconv"
+ "strings"
+
+ "github.com/spf13/pflag"
+)
+
+var fs *pflag.FlagSet
+
+// Parse takes the pointer to a struct filled with variables which should be read
+// from ENV, default or flag. The precedence in this is flag > ENV > default. So
+// if a flag is specified on the CLI it will overwrite the ENV and otherwise ENV
+// overwrites the default specified.
+//
+// For your configuration struct you can use the following struct-tags to control
+// the behavior of rconfig:
+//
+// default: Set a default value
+// env: Read the value from this environment variable
+// flag: Flag to read in format "long,short" (for example "listen,l")
+// description: A help text for Usage output to guide your users
+//
+// The format you need to specify those values you can see in the example to this
+// function.
+//
+func Parse(config interface{}) error {
+ return parse(config, nil)
+}
+
+// Usage prints a basic usage with the corresponding defaults for the flags to
+// os.Stdout. The defaults are derived from the `default` struct-tag and the ENV.
+func Usage() {
+ if fs != nil && fs.Parsed() {
+ fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
+ fs.PrintDefaults()
+ }
+}
+
+func parse(in interface{}, args []string) error {
+ if args == nil {
+ args = os.Args
+ }
+
+ fs = pflag.NewFlagSet(os.Args[0], pflag.ExitOnError)
+ if err := execTags(in, fs); err != nil {
+ return err
+ }
+
+ return fs.Parse(args)
+}
+
+func execTags(in interface{}, fs *pflag.FlagSet) error {
+ if reflect.TypeOf(in).Kind() != reflect.Ptr {
+ return errors.New("Calling parser with non-pointer")
+ }
+
+ if reflect.ValueOf(in).Elem().Kind() != reflect.Struct {
+ return errors.New("Calling parser with pointer to non-struct")
+ }
+
+ st := reflect.ValueOf(in).Elem()
+ for i := 0; i < st.NumField(); i++ {
+ valField := st.Field(i)
+ typeField := st.Type().Field(i)
+
+ if typeField.Tag.Get("default") == "" && typeField.Tag.Get("env") == "" && typeField.Tag.Get("flag") == "" && typeField.Type.Kind() != reflect.Struct {
+ // None of our supported tags is present and it's not a sub-struct
+ continue
+ }
+
+ value := envDefault(typeField.Tag.Get("env"), typeField.Tag.Get("default"))
+ parts := strings.Split(typeField.Tag.Get("flag"), ",")
+
+ switch typeField.Type.Kind() {
+ case reflect.String:
+ if typeField.Tag.Get("flag") != "" {
+ if len(parts) == 1 {
+ fs.StringVar(valField.Addr().Interface().(*string), parts[0], value, typeField.Tag.Get("description"))
+ } else {
+ fs.StringVarP(valField.Addr().Interface().(*string), parts[0], parts[1], value, typeField.Tag.Get("description"))
+ }
+ } else {
+ valField.SetString(value)
+ }
+
+ case reflect.Bool:
+ v := value == "true"
+ if typeField.Tag.Get("flag") != "" {
+ if len(parts) == 1 {
+ fs.BoolVar(valField.Addr().Interface().(*bool), parts[0], v, typeField.Tag.Get("description"))
+ } else {
+ fs.BoolVarP(valField.Addr().Interface().(*bool), parts[0], parts[1], v, typeField.Tag.Get("description"))
+ }
+ } else {
+ valField.SetBool(v)
+ }
+
+ case reflect.Int, reflect.Int8, reflect.Int32, reflect.Int64:
+ vt, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ if value == "" {
+ vt = 0
+ } else {
+ return err
+ }
+ }
+ if typeField.Tag.Get("flag") != "" {
+ registerFlagInt(typeField.Type.Kind(), fs, valField.Addr().Interface(), parts, vt, typeField.Tag.Get("description"))
+ } else {
+ valField.SetInt(vt)
+ }
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ vt, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ if value == "" {
+ vt = 0
+ } else {
+ return err
+ }
+ }
+ if typeField.Tag.Get("flag") != "" {
+ registerFlagUint(typeField.Type.Kind(), fs, valField.Addr().Interface(), parts, vt, typeField.Tag.Get("description"))
+ } else {
+ valField.SetUint(vt)
+ }
+
+ case reflect.Float32, reflect.Float64:
+ vt, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ if value == "" {
+ vt = 0.0
+ } else {
+ return err
+ }
+ }
+ if typeField.Tag.Get("flag") != "" {
+ registerFlagFloat(typeField.Type.Kind(), fs, valField.Addr().Interface(), parts, vt, typeField.Tag.Get("description"))
+ } else {
+ valField.SetFloat(vt)
+ }
+
+ case reflect.Struct:
+ if err := execTags(valField.Addr().Interface(), fs); err != nil {
+ return err
+ }
+
+ case reflect.Slice:
+ switch typeField.Type.Elem().Kind() {
+ case reflect.Int:
+ def := []int{}
+ for _, v := range strings.Split(value, ",") {
+ it, err := strconv.ParseInt(strings.TrimSpace(v), 10, 64)
+ if err != nil {
+ return err
+ }
+ def = append(def, int(it))
+ }
+ if len(parts) == 1 {
+ fs.IntSliceVar(valField.Addr().Interface().(*[]int), parts[0], def, typeField.Tag.Get("description"))
+ } else {
+ fs.IntSliceVarP(valField.Addr().Interface().(*[]int), parts[0], parts[1], def, typeField.Tag.Get("description"))
+ }
+ case reflect.String:
+ del := typeField.Tag.Get("delimiter")
+ if len(del) == 0 {
+ del = ","
+ }
+ def := strings.Split(value, del)
+ if len(parts) == 1 {
+ fs.StringSliceVar(valField.Addr().Interface().(*[]string), parts[0], def, typeField.Tag.Get("description"))
+ } else {
+ fs.StringSliceVarP(valField.Addr().Interface().(*[]string), parts[0], parts[1], def, typeField.Tag.Get("description"))
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+func registerFlagFloat(t reflect.Kind, fs *pflag.FlagSet, field interface{}, parts []string, vt float64, desc string) {
+ switch t {
+ case reflect.Float32:
+ if len(parts) == 1 {
+ fs.Float32Var(field.(*float32), parts[0], float32(vt), desc)
+ } else {
+ fs.Float32VarP(field.(*float32), parts[0], parts[1], float32(vt), desc)
+ }
+ case reflect.Float64:
+ if len(parts) == 1 {
+ fs.Float64Var(field.(*float64), parts[0], float64(vt), desc)
+ } else {
+ fs.Float64VarP(field.(*float64), parts[0], parts[1], float64(vt), desc)
+ }
+ }
+}
+
+func registerFlagInt(t reflect.Kind, fs *pflag.FlagSet, field interface{}, parts []string, vt int64, desc string) {
+ switch t {
+ case reflect.Int:
+ if len(parts) == 1 {
+ fs.IntVar(field.(*int), parts[0], int(vt), desc)
+ } else {
+ fs.IntVarP(field.(*int), parts[0], parts[1], int(vt), desc)
+ }
+ case reflect.Int8:
+ if len(parts) == 1 {
+ fs.Int8Var(field.(*int8), parts[0], int8(vt), desc)
+ } else {
+ fs.Int8VarP(field.(*int8), parts[0], parts[1], int8(vt), desc)
+ }
+ case reflect.Int32:
+ if len(parts) == 1 {
+ fs.Int32Var(field.(*int32), parts[0], int32(vt), desc)
+ } else {
+ fs.Int32VarP(field.(*int32), parts[0], parts[1], int32(vt), desc)
+ }
+ case reflect.Int64:
+ if len(parts) == 1 {
+ fs.Int64Var(field.(*int64), parts[0], int64(vt), desc)
+ } else {
+ fs.Int64VarP(field.(*int64), parts[0], parts[1], int64(vt), desc)
+ }
+ }
+}
+
+func registerFlagUint(t reflect.Kind, fs *pflag.FlagSet, field interface{}, parts []string, vt uint64, desc string) {
+ switch t {
+ case reflect.Uint:
+ if len(parts) == 1 {
+ fs.UintVar(field.(*uint), parts[0], uint(vt), desc)
+ } else {
+ fs.UintVarP(field.(*uint), parts[0], parts[1], uint(vt), desc)
+ }
+ case reflect.Uint8:
+ if len(parts) == 1 {
+ fs.Uint8Var(field.(*uint8), parts[0], uint8(vt), desc)
+ } else {
+ fs.Uint8VarP(field.(*uint8), parts[0], parts[1], uint8(vt), desc)
+ }
+ case reflect.Uint16:
+ if len(parts) == 1 {
+ fs.Uint16Var(field.(*uint16), parts[0], uint16(vt), desc)
+ } else {
+ fs.Uint16VarP(field.(*uint16), parts[0], parts[1], uint16(vt), desc)
+ }
+ case reflect.Uint32:
+ if len(parts) == 1 {
+ fs.Uint32Var(field.(*uint32), parts[0], uint32(vt), desc)
+ } else {
+ fs.Uint32VarP(field.(*uint32), parts[0], parts[1], uint32(vt), desc)
+ }
+ case reflect.Uint64:
+ if len(parts) == 1 {
+ fs.Uint64Var(field.(*uint64), parts[0], uint64(vt), desc)
+ } else {
+ fs.Uint64VarP(field.(*uint64), parts[0], parts[1], uint64(vt), desc)
+ }
+ }
+}
+
+func envDefault(env, def string) string {
+ value := def
+
+ if env != "" {
+ if e := os.Getenv(env); e != "" {
+ value = e
+ }
+ }
+
+ return value
+}
diff --git a/Godeps/_workspace/src/github.com/Luzifer/rconfig/config_test.go b/Godeps/_workspace/src/github.com/Luzifer/rconfig/config_test.go
new file mode 100644
index 0000000..84830a8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Luzifer/rconfig/config_test.go
@@ -0,0 +1,274 @@
+package rconfig
+
+import (
+ "os"
+ "testing"
+)
+
+func TestGeneralMechanics(t *testing.T) {
+ cfg := struct {
+ Test string `default:"foo" env:"shell" flag:"shell" description:"Test"`
+ Test2 string `default:"blub" env:"testvar" flag:"testvar,t" description:"Test"`
+ DefaultFlag string `default:"goo"`
+ SadFlag string
+ }{}
+
+ parse(&cfg, []string{
+ "--shell=test23",
+ "-t", "bla",
+ })
+
+ if cfg.Test != "test23" {
+ t.Errorf("Test should be 'test23', is '%s'", cfg.Test)
+ }
+
+ if cfg.Test2 != "bla" {
+ t.Errorf("Test2 should be 'bla', is '%s'", cfg.Test2)
+ }
+
+ if cfg.SadFlag != "" {
+ t.Errorf("SadFlag should be '', is '%s'", cfg.SadFlag)
+ }
+
+ if cfg.DefaultFlag != "goo" {
+ t.Errorf("DefaultFlag should be 'goo', is '%s'", cfg.DefaultFlag)
+ }
+
+ parse(&cfg, []string{})
+
+ if cfg.Test != "foo" {
+ t.Errorf("Test should be 'foo', is '%s'", cfg.Test)
+ }
+
+ os.Setenv("shell", "test546")
+ parse(&cfg, []string{})
+
+ if cfg.Test != "test546" {
+ t.Errorf("Test should be 'test546', is '%s'", cfg.Test)
+ }
+}
+
+func TestBool(t *testing.T) {
+ cfg := struct {
+ Test1 bool `default:"true"`
+ Test2 bool `default:"false" flag:"test2"`
+ Test3 bool `default:"true" flag:"test3,t"`
+ Test4 bool `flag:"test4"`
+ }{}
+
+ parse(&cfg, []string{
+ "--test2",
+ "-t",
+ })
+
+ if !cfg.Test1 {
+ t.Errorf("Test1 should be 'true', is '%+v'", cfg.Test1)
+ }
+ if !cfg.Test2 {
+ t.Errorf("Test1 should be 'true', is '%+v'", cfg.Test2)
+ }
+ if !cfg.Test3 {
+ t.Errorf("Test1 should be 'true', is '%+v'", cfg.Test3)
+ }
+ if cfg.Test4 {
+ t.Errorf("Test1 should be 'false', is '%+v'", cfg.Test3)
+ }
+}
+
+func TestInt(t *testing.T) {
+ cfg := struct {
+ Test int `flag:"int"`
+ TestP int `flag:"intp,i"`
+ Test8 int8 `flag:"int8"`
+ Test8P int8 `flag:"int8p,8"`
+ Test32 int32 `flag:"int32"`
+ Test32P int32 `flag:"int32p,3"`
+ Test64 int64 `flag:"int64"`
+ Test64P int64 `flag:"int64p,6"`
+ TestDef int8 `default:"66"`
+ }{}
+
+ parse(&cfg, []string{
+ "--int=1", "-i", "2",
+ "--int8=3", "-8", "4",
+ "--int32=5", "-3", "6",
+ "--int64=7", "-6", "8",
+ })
+
+ if cfg.Test != 1 || cfg.TestP != 2 || cfg.Test8 != 3 || cfg.Test8P != 4 || cfg.Test32 != 5 || cfg.Test32P != 6 || cfg.Test64 != 7 || cfg.Test64P != 8 {
+ t.Errorf("One of the int tests failed.")
+ }
+
+ if cfg.TestDef != 66 {
+ t.Errorf("TestDef should be '66', is '%d'", cfg.TestDef)
+ }
+}
+
+func TestUint(t *testing.T) {
+ cfg := struct {
+ Test uint `flag:"int"`
+ TestP uint `flag:"intp,i"`
+ Test8 uint8 `flag:"int8"`
+ Test8P uint8 `flag:"int8p,8"`
+ Test16 uint16 `flag:"int16"`
+ Test16P uint16 `flag:"int16p,1"`
+ Test32 uint32 `flag:"int32"`
+ Test32P uint32 `flag:"int32p,3"`
+ Test64 uint64 `flag:"int64"`
+ Test64P uint64 `flag:"int64p,6"`
+ TestDef uint8 `default:"66"`
+ }{}
+
+ parse(&cfg, []string{
+ "--int=1", "-i", "2",
+ "--int8=3", "-8", "4",
+ "--int32=5", "-3", "6",
+ "--int64=7", "-6", "8",
+ "--int16=9", "-1", "10",
+ })
+
+ if cfg.Test != 1 || cfg.TestP != 2 || cfg.Test8 != 3 || cfg.Test8P != 4 || cfg.Test32 != 5 || cfg.Test32P != 6 || cfg.Test64 != 7 || cfg.Test64P != 8 || cfg.Test16 != 9 || cfg.Test16P != 10 {
+ t.Errorf("One of the uint tests failed.")
+ }
+
+ if cfg.TestDef != 66 {
+ t.Errorf("TestDef should be '66', is '%d'", cfg.TestDef)
+ }
+}
+
+func TestFloat(t *testing.T) {
+ cfg := struct {
+ Test32 float32 `flag:"float32"`
+ Test32P float32 `flag:"float32p,3"`
+ Test64 float64 `flag:"float64"`
+ Test64P float64 `flag:"float64p,6"`
+ TestDef float32 `default:"66.256"`
+ }{}
+
+ parse(&cfg, []string{
+ "--float32=5.5", "-3", "6.6",
+ "--float64=7.7", "-6", "8.8",
+ })
+
+ if cfg.Test32 != 5.5 || cfg.Test32P != 6.6 || cfg.Test64 != 7.7 || cfg.Test64P != 8.8 {
+ t.Errorf("One of the int tests failed.")
+ }
+
+ if cfg.TestDef != 66.256 {
+ t.Errorf("TestDef should be '66.256', is '%.3f'", cfg.TestDef)
+ }
+}
+
+func TestSubStruct(t *testing.T) {
+ cfg := struct {
+ Test string `default:"blubb"`
+ Sub struct {
+ Test string `default:"Hallo"`
+ }
+ }{}
+
+ if err := parse(&cfg, []string{}); err != nil {
+ t.Errorf("Test errored: %s", err)
+ }
+
+ if cfg.Test != "blubb" {
+ t.Errorf("Test should be 'blubb', is '%s'", cfg.Test)
+ }
+
+ if cfg.Sub.Test != "Hallo" {
+ t.Errorf("Sub.Test should be 'Hallo', is '%s'", cfg.Sub.Test)
+ }
+}
+
+func TestSlice(t *testing.T) {
+ cfg := struct {
+ Int []int `default:"1,2,3" flag:"int"`
+ String []string `default:"a,b,c" flag:"string"`
+ IntP []int `default:"1,2,3" flag:"intp,i"`
+ StringP []string `default:"a,b,c" flag:"stringp,s"`
+ }{}
+
+ if err := parse(&cfg, []string{
+ "--int=4,5", "-s", "hallo,welt",
+ }); err != nil {
+ t.Errorf("Test errored: %s", err)
+ }
+
+ if len(cfg.Int) != 2 || cfg.Int[0] != 4 || cfg.Int[1] != 5 {
+ t.Errorf("Int should be '4,5', is '%+v'", cfg.Int)
+ }
+
+ if len(cfg.String) != 3 || cfg.String[0] != "a" || cfg.String[1] != "b" {
+ t.Errorf("String should be 'a,b,c', is '%+v'", cfg.String)
+ }
+
+ if len(cfg.StringP) != 2 || cfg.StringP[0] != "hallo" || cfg.StringP[1] != "welt" {
+ t.Errorf("StringP should be 'hallo,welt', is '%+v'", cfg.StringP)
+ }
+}
+
+func TestErrors(t *testing.T) {
+ if err := parse(&struct {
+ A int `default:"a"`
+ }{}, []string{}); err == nil {
+ t.Errorf("Test should have errored")
+ }
+
+ if err := parse(&struct {
+ A float32 `default:"a"`
+ }{}, []string{}); err == nil {
+ t.Errorf("Test should have errored")
+ }
+
+ if err := parse(&struct {
+ A uint `default:"a"`
+ }{}, []string{}); err == nil {
+ t.Errorf("Test should have errored")
+ }
+
+ if err := parse(&struct {
+ B struct {
+ A uint `default:"a"`
+ }
+ }{}, []string{}); err == nil {
+ t.Errorf("Test should have errored")
+ }
+
+ if err := parse(&struct {
+ A []int `default:"a,bn"`
+ }{}, []string{}); err == nil {
+ t.Errorf("Test should have errored")
+ }
+}
+
+func TestOSArgs(t *testing.T) {
+ os.Args = []string{"--a=bar"}
+
+ cfg := struct {
+ A string `default:"a" flag:"a"`
+ }{}
+
+ Parse(&cfg)
+
+ if cfg.A != "bar" {
+ t.Errorf("A should be 'bar', is '%s'", cfg.A)
+ }
+}
+
+func TestNonPointer(t *testing.T) {
+ cfg := struct {
+ A string `default:"a"`
+ }{}
+
+ if err := parse(cfg, []string{}); err == nil {
+ t.Errorf("Test should have errored")
+ }
+}
+
+func TestOtherType(t *testing.T) {
+ cfg := "test"
+
+ if err := parse(&cfg, []string{}); err == nil {
+ t.Errorf("Test should have errored")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/Luzifer/rconfig/example_test.go b/Godeps/_workspace/src/github.com/Luzifer/rconfig/example_test.go
new file mode 100644
index 0000000..0a65b2f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Luzifer/rconfig/example_test.go
@@ -0,0 +1,37 @@
+package rconfig
+
+import (
+ "fmt"
+ "os"
+)
+
+func ExampleParse() {
+ // We're building an example configuration with a sub-struct to be filled
+ // by the Parse command.
+ config := struct {
+ Username string `default:"unknown" flag:"user,u" description:"Your name"`
+ Details struct {
+ Age int `default:"25" flag:"age" description:"Your age"`
+ }
+ }{}
+
+ // To have more relieable results we're setting os.Args to a known value.
+ // In real-life use cases you wouldn't do this but parse the original
+ // commandline arguments.
+ os.Args = []string{
+ "example",
+ "--user=Luzifer",
+ }
+
+ Parse(&config)
+
+ fmt.Printf("Hello %s, happy birthday for your %dth birthday.",
+ config.Username,
+ config.Details.Age)
+
+ // You can also show an usage message for your user
+ Usage()
+
+ // Output:
+ // Hello Luzifer, happy birthday for your 25th birthday.
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/error.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/error.go
new file mode 100644
index 0000000..99d8c18
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/error.go
@@ -0,0 +1,105 @@
+// Package awserr represents API error interface accessors for the SDK.
+package awserr
+
+// An Error wraps lower level errors with code, message and an original error.
+// The underlying concrete error type may also satisfy other interfaces which
+// can be to used to obtain more specific information about the error.
+//
+// Calling Error() or String() will always include the full information about
+// an error based on its underlying type.
+//
+// Example:
+//
+// output, err := s3manage.Upload(svc, input, opts)
+// if err != nil {
+// if awsErr, ok := err.(awserr.Error); ok {
+// // Get error details
+// log.Println("Error:", err.Code(), err.Message())
+//
+// Prints out full error message, including original error if there was one.
+// log.Println("Error:", err.Error())
+//
+// // Get original error
+// if origErr := err.Err(); origErr != nil {
+// // operate on original error.
+// }
+// } else {
+// fmt.Println(err.Error())
+// }
+// }
+//
+type Error interface {
+ // Satisfy the generic error interface.
+ error
+
+ // Returns the short phrase depicting the classification of the error.
+ Code() string
+
+ // Returns the error details message.
+ Message() string
+
+ // Returns the original error if one was set. Nil is returned if not set.
+ OrigErr() error
+}
+
+// New returns an Error object described by the code, message, and origErr.
+//
+// If origErr satisfies the Error interface it will not be wrapped within a new
+// Error object and will instead be returned.
+func New(code, message string, origErr error) Error {
+ if e, ok := origErr.(Error); ok && e != nil {
+ return e
+ }
+ return newBaseError(code, message, origErr)
+}
+
+// A RequestFailure is an interface to extract request failure information from
+// an Error such as the request ID of the failed request returned by a service.
+// RequestFailures may not always have a requestID value if the request failed
+// prior to reaching the service such as a connection error.
+//
+// Example:
+//
+// output, err := s3manage.Upload(svc, input, opts)
+// if err != nil {
+// if reqerr, ok := err.(RequestFailure); ok {
+// log.Printf("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
+// } else {
+// log.Printf("Error:", err.Error()
+// }
+// }
+//
+// Combined with awserr.Error:
+//
+// output, err := s3manage.Upload(svc, input, opts)
+// if err != nil {
+// if awsErr, ok := err.(awserr.Error); ok {
+// // Generic AWS Error with Code, Message, and original error (if any)
+// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+//
+// if reqErr, ok := err.(awserr.RequestFailure); ok {
+// // A service error occurred
+// fmt.Println(reqErr.StatusCode(), reqErr.RequestID())
+// }
+// } else {
+// fmt.Println(err.Error())
+// }
+// }
+//
+type RequestFailure interface {
+ Error
+
+ // The status code of the HTTP response.
+ StatusCode() int
+
+ // The request ID returned by the service for a request failure. This will
+ // be empty if no request ID is available such as the request failed due
+ // to a connection error.
+ RequestID() string
+}
+
+// NewRequestFailure returns a new request error wrapper for the given Error
+// provided.
+func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure {
+ return newRequestError(err, statusCode, reqID)
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/types.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/types.go
new file mode 100644
index 0000000..418fc4c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/types.go
@@ -0,0 +1,135 @@
+package awserr
+
+import "fmt"
+
+// SprintError returns a string of the formatted error code.
+//
+// Both extra and origErr are optional. If they are included their lines
+// will be added, but if they are not included their lines will be ignored.
+func SprintError(code, message, extra string, origErr error) string {
+ msg := fmt.Sprintf("%s: %s", code, message)
+ if extra != "" {
+ msg = fmt.Sprintf("%s\n\t%s", msg, extra)
+ }
+ if origErr != nil {
+ msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error())
+ }
+ return msg
+}
+
+// A baseError wraps the code and message which defines an error. It also
+// can be used to wrap an original error object.
+//
+// Should be used as the root for errors satisfying the awserr.Error. Also
+// for any error which does not fit into a specific error wrapper type.
+type baseError struct {
+ // Classification of error
+ code string
+
+ // Detailed information about error
+ message string
+
+ // Optional original error this error is based off of. Allows building
+ // chained errors.
+ origErr error
+}
+
+// newBaseError returns an error object for the code, message, and err.
+//
+// code is a short no whitespace phrase depicting the classification of
+// the error that is being created.
+//
+// message is the free flow string containing detailed information about the error.
+//
+// origErr is the error object which will be nested under the new error to be returned.
+func newBaseError(code, message string, origErr error) *baseError {
+ return &baseError{
+ code: code,
+ message: message,
+ origErr: origErr,
+ }
+}
+
+// Error returns the string representation of the error.
+//
+// See ErrorWithExtra for formatting.
+//
+// Satisfies the error interface.
+func (b baseError) Error() string {
+ return SprintError(b.code, b.message, "", b.origErr)
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (b baseError) String() string {
+ return b.Error()
+}
+
+// Code returns the short phrase depicting the classification of the error.
+func (b baseError) Code() string {
+ return b.code
+}
+
+// Message returns the error details message.
+func (b baseError) Message() string {
+ return b.message
+}
+
+// OrigErr returns the original error if one was set. Nil is returned if no error
+// was set.
+func (b baseError) OrigErr() error {
+ return b.origErr
+}
+
+// So that the Error interface type can be included as an anonymous field
+// in the requestError struct and not conflict with the error.Error() method.
+type awsError Error
+
+// A requestError wraps a request or service error.
+//
+// Composed of baseError for code, message, and original error.
+type requestError struct {
+ awsError
+ statusCode int
+ requestID string
+}
+
+// newRequestError returns a wrapped error with additional information for request
+// status code, and service requestID.
+//
+// Should be used to wrap all request which involve service requests. Even if
+// the request failed without a service response, but had an HTTP status code
+// that may be meaningful.
+//
+// Also wraps original errors via the baseError.
+func newRequestError(err Error, statusCode int, requestID string) *requestError {
+ return &requestError{
+ awsError: err,
+ statusCode: statusCode,
+ requestID: requestID,
+ }
+}
+
+// Error returns the string representation of the error.
+// Satisfies the error interface.
+func (r requestError) Error() string {
+ extra := fmt.Sprintf("status code: %d, request id: [%s]",
+ r.statusCode, r.requestID)
+ return SprintError(r.Code(), r.Message(), extra, r.OrigErr())
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (r requestError) String() string {
+ return r.Error()
+}
+
+// StatusCode returns the wrapped status code for the error
+func (r requestError) StatusCode() int {
+ return r.statusCode
+}
+
+// RequestID returns the wrapped requestID
+func (r requestError) RequestID() string {
+ return r.requestID
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
new file mode 100644
index 0000000..99a1156
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
@@ -0,0 +1,95 @@
+package awsutil
+
+import (
+ "io"
+ "reflect"
+)
+
+// Copy deeply copies a src structure to dst. Useful for copying request and
+// response structures.
+//
+// Can copy between structs of different type, but will only copy fields which
+// are assignable, and exist in both structs. Fields which are not assignable,
+// or do not exist in both structs are ignored.
+func Copy(dst, src interface{}) {
+ dstval := reflect.ValueOf(dst)
+ if !dstval.IsValid() {
+ panic("Copy dst cannot be nil")
+ }
+
+ rcopy(dstval, reflect.ValueOf(src), true)
+}
+
+// CopyOf returns a copy of src while also allocating the memory for dst.
+// src must be a pointer type or this operation will fail.
+func CopyOf(src interface{}) (dst interface{}) {
+ dsti := reflect.New(reflect.TypeOf(src).Elem())
+ dst = dsti.Interface()
+ rcopy(dsti, reflect.ValueOf(src), true)
+ return
+}
+
+// rcopy performs a recursive copy of values from the source to destination.
+//
+// root is used to skip certain aspects of the copy which are not valid
+// for the root node of a object.
+func rcopy(dst, src reflect.Value, root bool) {
+ if !src.IsValid() {
+ return
+ }
+
+ switch src.Kind() {
+ case reflect.Ptr:
+ if _, ok := src.Interface().(io.Reader); ok {
+ if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() {
+ dst.Elem().Set(src)
+ } else if dst.CanSet() {
+ dst.Set(src)
+ }
+ } else {
+ e := src.Type().Elem()
+ if dst.CanSet() && !src.IsNil() {
+ dst.Set(reflect.New(e))
+ }
+ if src.Elem().IsValid() {
+ // Keep the current root state since the depth hasn't changed
+ rcopy(dst.Elem(), src.Elem(), root)
+ }
+ }
+ case reflect.Struct:
+ if !root {
+ dst.Set(reflect.New(src.Type()).Elem())
+ }
+
+ t := dst.Type()
+ for i := 0; i < t.NumField(); i++ {
+ name := t.Field(i).Name
+ srcval := src.FieldByName(name)
+ if srcval.IsValid() {
+ rcopy(dst.FieldByName(name), srcval, false)
+ }
+ }
+ case reflect.Slice:
+ s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
+ dst.Set(s)
+ for i := 0; i < src.Len(); i++ {
+ rcopy(dst.Index(i), src.Index(i), false)
+ }
+ case reflect.Map:
+ s := reflect.MakeMap(src.Type())
+ dst.Set(s)
+ for _, k := range src.MapKeys() {
+ v := src.MapIndex(k)
+ v2 := reflect.New(v.Type()).Elem()
+ rcopy(v2, v, false)
+ dst.SetMapIndex(k, v2)
+ }
+ default:
+ // Assign the value if possible. If its not assignable, the value would
+ // need to be converted and the impact of that may be unexpected, or is
+ // not compatible with the dst type.
+ if src.Type().AssignableTo(dst.Type()) {
+ dst.Set(src)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go
new file mode 100644
index 0000000..4ae04ac
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go
@@ -0,0 +1,193 @@
+package awsutil_test
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/stretchr/testify/assert"
+)
+
+func ExampleCopy() {
+ type Foo struct {
+ A int
+ B []*string
+ }
+
+ // Create the initial value
+ str1 := "hello"
+ str2 := "bye bye"
+ f1 := &Foo{A: 1, B: []*string{&str1, &str2}}
+
+ // Do the copy
+ var f2 Foo
+ awsutil.Copy(&f2, f1)
+
+ // Print the result
+ fmt.Println(awsutil.StringValue(f2))
+
+ // Output:
+ // {
+ // A: 1,
+ // B: ["hello","bye bye"]
+ // }
+}
+
+func TestCopy(t *testing.T) {
+ type Foo struct {
+ A int
+ B []*string
+ C map[string]*int
+ }
+
+ // Create the initial value
+ str1 := "hello"
+ str2 := "bye bye"
+ int1 := 1
+ int2 := 2
+ f1 := &Foo{
+ A: 1,
+ B: []*string{&str1, &str2},
+ C: map[string]*int{
+ "A": &int1,
+ "B": &int2,
+ },
+ }
+
+ // Do the copy
+ var f2 Foo
+ awsutil.Copy(&f2, f1)
+
+ // Values are equal
+ assert.Equal(t, f2.A, f1.A)
+ assert.Equal(t, f2.B, f1.B)
+ assert.Equal(t, f2.C, f1.C)
+
+ // But pointers are not!
+ str3 := "nothello"
+ int3 := 57
+ f2.A = 100
+ f2.B[0] = &str3
+ f2.C["B"] = &int3
+ assert.NotEqual(t, f2.A, f1.A)
+ assert.NotEqual(t, f2.B, f1.B)
+ assert.NotEqual(t, f2.C, f1.C)
+}
+
+func TestCopyIgnoreNilMembers(t *testing.T) {
+ type Foo struct {
+ A *string
+ }
+
+ f := &Foo{}
+ assert.Nil(t, f.A)
+
+ var f2 Foo
+ awsutil.Copy(&f2, f)
+ assert.Nil(t, f2.A)
+
+ fcopy := awsutil.CopyOf(f)
+ f3 := fcopy.(*Foo)
+ assert.Nil(t, f3.A)
+}
+
+func TestCopyPrimitive(t *testing.T) {
+ str := "hello"
+ var s string
+ awsutil.Copy(&s, &str)
+ assert.Equal(t, "hello", s)
+}
+
+func TestCopyNil(t *testing.T) {
+ var s string
+ awsutil.Copy(&s, nil)
+ assert.Equal(t, "", s)
+}
+
+func TestCopyReader(t *testing.T) {
+ var buf io.Reader = bytes.NewReader([]byte("hello world"))
+ var r io.Reader
+ awsutil.Copy(&r, buf)
+ b, err := ioutil.ReadAll(r)
+ assert.NoError(t, err)
+ assert.Equal(t, []byte("hello world"), b)
+
+ // empty bytes because this is not a deep copy
+ b, err = ioutil.ReadAll(buf)
+ assert.NoError(t, err)
+ assert.Equal(t, []byte(""), b)
+}
+
+func TestCopyDifferentStructs(t *testing.T) {
+ type SrcFoo struct {
+ A int
+ B []*string
+ C map[string]*int
+ SrcUnique string
+ SameNameDiffType int
+ }
+ type DstFoo struct {
+ A int
+ B []*string
+ C map[string]*int
+ DstUnique int
+ SameNameDiffType string
+ }
+
+ // Create the initial value
+ str1 := "hello"
+ str2 := "bye bye"
+ int1 := 1
+ int2 := 2
+ f1 := &SrcFoo{
+ A: 1,
+ B: []*string{&str1, &str2},
+ C: map[string]*int{
+ "A": &int1,
+ "B": &int2,
+ },
+ SrcUnique: "unique",
+ SameNameDiffType: 1,
+ }
+
+ // Do the copy
+ var f2 DstFoo
+ awsutil.Copy(&f2, f1)
+
+ // Values are equal
+ assert.Equal(t, f2.A, f1.A)
+ assert.Equal(t, f2.B, f1.B)
+ assert.Equal(t, f2.C, f1.C)
+ assert.Equal(t, "unique", f1.SrcUnique)
+ assert.Equal(t, 1, f1.SameNameDiffType)
+ assert.Equal(t, 0, f2.DstUnique)
+ assert.Equal(t, "", f2.SameNameDiffType)
+}
+
+func ExampleCopyOf() {
+ type Foo struct {
+ A int
+ B []*string
+ }
+
+ // Create the initial value
+ str1 := "hello"
+ str2 := "bye bye"
+ f1 := &Foo{A: 1, B: []*string{&str1, &str2}}
+
+ // Do the copy
+ v := awsutil.CopyOf(f1)
+ var f2 *Foo = v.(*Foo)
+
+ // Print the result
+ fmt.Println(awsutil.StringValue(f2))
+
+ // Output:
+ // {
+ // A: 1,
+ // B: ["hello","bye bye"]
+ // }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
new file mode 100644
index 0000000..7ae01ef
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
@@ -0,0 +1,175 @@
+package awsutil
+
+import (
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`)
+
+// rValuesAtPath returns a slice of values found in value v. The values
+// in v are explored recursively so all nested values are collected.
+func rValuesAtPath(v interface{}, path string, create bool, caseSensitive bool) []reflect.Value {
+ pathparts := strings.Split(path, "||")
+ if len(pathparts) > 1 {
+ for _, pathpart := range pathparts {
+ vals := rValuesAtPath(v, pathpart, create, caseSensitive)
+ if vals != nil && len(vals) > 0 {
+ return vals
+ }
+ }
+ return nil
+ }
+
+ values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))}
+ components := strings.Split(path, ".")
+ for len(values) > 0 && len(components) > 0 {
+ var index *int64
+ var indexStar bool
+ c := strings.TrimSpace(components[0])
+ if c == "" { // no actual component, illegal syntax
+ return nil
+ } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] {
+ // TODO normalize case for user
+ return nil // don't support unexported fields
+ }
+
+ // parse this component
+ if m := indexRe.FindStringSubmatch(c); m != nil {
+ c = m[1]
+ if m[2] == "" {
+ index = nil
+ indexStar = true
+ } else {
+ i, _ := strconv.ParseInt(m[2], 10, 32)
+ index = &i
+ indexStar = false
+ }
+ }
+
+ nextvals := []reflect.Value{}
+ for _, value := range values {
+ // pull component name out of struct member
+ if value.Kind() != reflect.Struct {
+ continue
+ }
+
+ if c == "*" { // pull all members
+ for i := 0; i < value.NumField(); i++ {
+ if f := reflect.Indirect(value.Field(i)); f.IsValid() {
+ nextvals = append(nextvals, f)
+ }
+ }
+ continue
+ }
+
+ value = value.FieldByNameFunc(func(name string) bool {
+ if c == name {
+ return true
+ } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) {
+ return true
+ }
+ return false
+ })
+
+ if create && value.Kind() == reflect.Ptr && value.IsNil() {
+ value.Set(reflect.New(value.Type().Elem()))
+ value = value.Elem()
+ } else {
+ value = reflect.Indirect(value)
+ }
+
+ if value.IsValid() {
+ nextvals = append(nextvals, value)
+ }
+ }
+ values = nextvals
+
+ if indexStar || index != nil {
+ nextvals = []reflect.Value{}
+ for _, value := range values {
+ value := reflect.Indirect(value)
+ if value.Kind() != reflect.Slice {
+ continue
+ }
+
+ if indexStar { // grab all indices
+ for i := 0; i < value.Len(); i++ {
+ idx := reflect.Indirect(value.Index(i))
+ if idx.IsValid() {
+ nextvals = append(nextvals, idx)
+ }
+ }
+ continue
+ }
+
+ // pull out index
+ i := int(*index)
+ if i >= value.Len() { // check out of bounds
+ if create {
+ // TODO resize slice
+ } else {
+ continue
+ }
+ } else if i < 0 { // support negative indexing
+ i = value.Len() + i
+ }
+ value = reflect.Indirect(value.Index(i))
+
+ if value.IsValid() {
+ nextvals = append(nextvals, value)
+ }
+ }
+ values = nextvals
+ }
+
+ components = components[1:]
+ }
+ return values
+}
+
+// ValuesAtPath returns a list of objects at the lexical path inside of a structure
+func ValuesAtPath(i interface{}, path string) []interface{} {
+ if rvals := rValuesAtPath(i, path, false, true); rvals != nil {
+ vals := make([]interface{}, len(rvals))
+ for i, rval := range rvals {
+ vals[i] = rval.Interface()
+ }
+ return vals
+ }
+ return nil
+}
+
+// ValuesAtAnyPath returns a list of objects at the case-insensitive lexical
+// path inside of a structure
+func ValuesAtAnyPath(i interface{}, path string) []interface{} {
+ if rvals := rValuesAtPath(i, path, false, false); rvals != nil {
+ vals := make([]interface{}, len(rvals))
+ for i, rval := range rvals {
+ vals[i] = rval.Interface()
+ }
+ return vals
+ }
+ return nil
+}
+
+// SetValueAtPath sets an object at the lexical path inside of a structure
+func SetValueAtPath(i interface{}, path string, v interface{}) {
+ if rvals := rValuesAtPath(i, path, true, true); rvals != nil {
+ for _, rval := range rvals {
+ rval.Set(reflect.ValueOf(v))
+ }
+ }
+}
+
+// SetValueAtAnyPath sets an object at the case insensitive lexical path inside
+// of a structure
+func SetValueAtAnyPath(i interface{}, path string, v interface{}) {
+ if rvals := rValuesAtPath(i, path, true, false); rvals != nil {
+ for _, rval := range rvals {
+ rval.Set(reflect.ValueOf(v))
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go
new file mode 100644
index 0000000..ed10aec
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go
@@ -0,0 +1,65 @@
+package awsutil_test
+
+import (
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/stretchr/testify/assert"
+)
+
+type Struct struct {
+ A []Struct
+ z []Struct
+ B *Struct
+ D *Struct
+ C string
+}
+
+var data = Struct{
+ A: []Struct{{C: "value1"}, {C: "value2"}, {C: "value3"}},
+ z: []Struct{{C: "value1"}, {C: "value2"}, {C: "value3"}},
+ B: &Struct{B: &Struct{C: "terminal"}, D: &Struct{C: "terminal2"}},
+ C: "initial",
+}
+
+func TestValueAtPathSuccess(t *testing.T) {
+ assert.Equal(t, []interface{}{"initial"}, awsutil.ValuesAtPath(data, "C"))
+ assert.Equal(t, []interface{}{"value1"}, awsutil.ValuesAtPath(data, "A[0].C"))
+ assert.Equal(t, []interface{}{"value2"}, awsutil.ValuesAtPath(data, "A[1].C"))
+ assert.Equal(t, []interface{}{"value3"}, awsutil.ValuesAtPath(data, "A[2].C"))
+ assert.Equal(t, []interface{}{"value3"}, awsutil.ValuesAtAnyPath(data, "a[2].c"))
+ assert.Equal(t, []interface{}{"value3"}, awsutil.ValuesAtPath(data, "A[-1].C"))
+ assert.Equal(t, []interface{}{"value1", "value2", "value3"}, awsutil.ValuesAtPath(data, "A[].C"))
+ assert.Equal(t, []interface{}{"terminal"}, awsutil.ValuesAtPath(data, "B . B . C"))
+ assert.Equal(t, []interface{}{"terminal", "terminal2"}, awsutil.ValuesAtPath(data, "B.*.C"))
+ assert.Equal(t, []interface{}{"initial"}, awsutil.ValuesAtPath(data, "A.D.X || C"))
+}
+
+func TestValueAtPathFailure(t *testing.T) {
+ assert.Equal(t, []interface{}(nil), awsutil.ValuesAtPath(data, "C.x"))
+ assert.Equal(t, []interface{}(nil), awsutil.ValuesAtPath(data, ".x"))
+ assert.Equal(t, []interface{}{}, awsutil.ValuesAtPath(data, "X.Y.Z"))
+ assert.Equal(t, []interface{}{}, awsutil.ValuesAtPath(data, "A[100].C"))
+ assert.Equal(t, []interface{}{}, awsutil.ValuesAtPath(data, "A[3].C"))
+ assert.Equal(t, []interface{}{}, awsutil.ValuesAtPath(data, "B.B.C.Z"))
+ assert.Equal(t, []interface{}(nil), awsutil.ValuesAtPath(data, "z[-1].C"))
+ assert.Equal(t, []interface{}{}, awsutil.ValuesAtPath(nil, "A.B.C"))
+}
+
+func TestSetValueAtPathSuccess(t *testing.T) {
+ var s Struct
+ awsutil.SetValueAtPath(&s, "C", "test1")
+ awsutil.SetValueAtPath(&s, "B.B.C", "test2")
+ awsutil.SetValueAtPath(&s, "B.D.C", "test3")
+ assert.Equal(t, "test1", s.C)
+ assert.Equal(t, "test2", s.B.B.C)
+ assert.Equal(t, "test3", s.B.D.C)
+
+ awsutil.SetValueAtPath(&s, "B.*.C", "test0")
+ assert.Equal(t, "test0", s.B.B.C)
+ assert.Equal(t, "test0", s.B.D.C)
+
+ var s2 Struct
+ awsutil.SetValueAtAnyPath(&s2, "b.b.c", "test0")
+ assert.Equal(t, "test0", s2.B.B.C)
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
new file mode 100644
index 0000000..09673a1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
@@ -0,0 +1,103 @@
+package awsutil
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+)
+
+// StringValue returns the string representation of a value.
+func StringValue(i interface{}) string {
+ var buf bytes.Buffer
+ stringValue(reflect.ValueOf(i), 0, &buf)
+ return buf.String()
+}
+
+// stringValue will recursively walk value v to build a textual
+// representation of the value.
+func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) {
+ for v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ strtype := v.Type().String()
+ if strtype == "time.Time" {
+ fmt.Fprintf(buf, "%s", v.Interface())
+ break
+ } else if strings.HasPrefix(strtype, "io.") {
+ buf.WriteString("")
+ break
+ }
+
+ buf.WriteString("{\n")
+
+ names := []string{}
+ for i := 0; i < v.Type().NumField(); i++ {
+ name := v.Type().Field(i).Name
+ f := v.Field(i)
+ if name[0:1] == strings.ToLower(name[0:1]) {
+ continue // ignore unexported fields
+ }
+ if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() {
+ continue // ignore unset fields
+ }
+ names = append(names, name)
+ }
+
+ for i, n := range names {
+ val := v.FieldByName(n)
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(n + ": ")
+ stringValue(val, indent+2, buf)
+
+ if i < len(names)-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ case reflect.Slice:
+ nl, id, id2 := "", "", ""
+ if v.Len() > 3 {
+ nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
+ }
+ buf.WriteString("[" + nl)
+ for i := 0; i < v.Len(); i++ {
+ buf.WriteString(id2)
+ stringValue(v.Index(i), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString("," + nl)
+ }
+ }
+
+ buf.WriteString(nl + id + "]")
+ case reflect.Map:
+ buf.WriteString("{\n")
+
+ for i, k := range v.MapKeys() {
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(k.String() + ": ")
+ stringValue(v.MapIndex(k), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ default:
+ format := "%v"
+ switch v.Interface().(type) {
+ case string:
+ format = "%q"
+ case io.ReadSeeker, io.Reader:
+ format = "buffer(%p)"
+ }
+ fmt.Fprintf(buf, format, v.Interface())
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go
new file mode 100644
index 0000000..4699070
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go
@@ -0,0 +1,173 @@
+package aws
+
+import (
+ "io"
+ "net/http"
+ "os"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/credentials"
+)
+
+// DefaultChainCredentials is a Credentials which will find the first available
+// credentials Value from the list of Providers.
+//
+// This should be used in the default case. Once the type of credentials are
+// known switching to the specific Credentials will be more efficient.
+var DefaultChainCredentials = credentials.NewChainCredentials(
+ []credentials.Provider{
+ &credentials.EnvProvider{},
+ &credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
+ &credentials.EC2RoleProvider{ExpiryWindow: 5 * time.Minute},
+ })
+
+// The default number of retries for a service. The value of -1 indicates that
+// the service specific retry default will be used.
+const DefaultRetries = -1
+
+// DefaultConfig is the default all service configuration will be based off of.
+var DefaultConfig = &Config{
+ Credentials: DefaultChainCredentials,
+ Endpoint: "",
+ Region: os.Getenv("AWS_REGION"),
+ DisableSSL: false,
+ ManualSend: false,
+ HTTPClient: http.DefaultClient,
+ LogHTTPBody: false,
+ LogLevel: 0,
+ Logger: os.Stdout,
+ MaxRetries: DefaultRetries,
+ DisableParamValidation: false,
+ DisableComputeChecksums: false,
+ S3ForcePathStyle: false,
+}
+
+// A Config provides service configuration
+type Config struct {
+ Credentials *credentials.Credentials
+ Endpoint string
+ Region string
+ DisableSSL bool
+ ManualSend bool
+ HTTPClient *http.Client
+ LogHTTPBody bool
+ LogLevel uint
+ Logger io.Writer
+ MaxRetries int
+ DisableParamValidation bool
+ DisableComputeChecksums bool
+ S3ForcePathStyle bool
+}
+
+// Copy will return a shallow copy of the Config object.
+func (c Config) Copy() Config {
+ dst := Config{}
+ dst.Credentials = c.Credentials
+ dst.Endpoint = c.Endpoint
+ dst.Region = c.Region
+ dst.DisableSSL = c.DisableSSL
+ dst.ManualSend = c.ManualSend
+ dst.HTTPClient = c.HTTPClient
+ dst.LogHTTPBody = c.LogHTTPBody
+ dst.LogLevel = c.LogLevel
+ dst.Logger = c.Logger
+ dst.MaxRetries = c.MaxRetries
+ dst.DisableParamValidation = c.DisableParamValidation
+ dst.DisableComputeChecksums = c.DisableComputeChecksums
+ dst.S3ForcePathStyle = c.S3ForcePathStyle
+
+ return dst
+}
+
+// Merge merges the newcfg attribute values into this Config. Each attribute
+// will be merged into this config if the newcfg attribute's value is non-zero.
+// Due to this, newcfg attributes with zero values cannot be merged in. For
+// example bool attributes cannot be cleared using Merge, and must be explicitly
+// set on the Config structure.
+func (c Config) Merge(newcfg *Config) *Config {
+ if newcfg == nil {
+ return &c
+ }
+
+ cfg := Config{}
+
+ if newcfg.Credentials != nil {
+ cfg.Credentials = newcfg.Credentials
+ } else {
+ cfg.Credentials = c.Credentials
+ }
+
+ if newcfg.Endpoint != "" {
+ cfg.Endpoint = newcfg.Endpoint
+ } else {
+ cfg.Endpoint = c.Endpoint
+ }
+
+ if newcfg.Region != "" {
+ cfg.Region = newcfg.Region
+ } else {
+ cfg.Region = c.Region
+ }
+
+ if newcfg.DisableSSL {
+ cfg.DisableSSL = newcfg.DisableSSL
+ } else {
+ cfg.DisableSSL = c.DisableSSL
+ }
+
+ if newcfg.ManualSend {
+ cfg.ManualSend = newcfg.ManualSend
+ } else {
+ cfg.ManualSend = c.ManualSend
+ }
+
+ if newcfg.HTTPClient != nil {
+ cfg.HTTPClient = newcfg.HTTPClient
+ } else {
+ cfg.HTTPClient = c.HTTPClient
+ }
+
+ if newcfg.LogHTTPBody {
+ cfg.LogHTTPBody = newcfg.LogHTTPBody
+ } else {
+ cfg.LogHTTPBody = c.LogHTTPBody
+ }
+
+ if newcfg.LogLevel != 0 {
+ cfg.LogLevel = newcfg.LogLevel
+ } else {
+ cfg.LogLevel = c.LogLevel
+ }
+
+ if newcfg.Logger != nil {
+ cfg.Logger = newcfg.Logger
+ } else {
+ cfg.Logger = c.Logger
+ }
+
+ if newcfg.MaxRetries != DefaultRetries {
+ cfg.MaxRetries = newcfg.MaxRetries
+ } else {
+ cfg.MaxRetries = c.MaxRetries
+ }
+
+ if newcfg.DisableParamValidation {
+ cfg.DisableParamValidation = newcfg.DisableParamValidation
+ } else {
+ cfg.DisableParamValidation = c.DisableParamValidation
+ }
+
+ if newcfg.DisableComputeChecksums {
+ cfg.DisableComputeChecksums = newcfg.DisableComputeChecksums
+ } else {
+ cfg.DisableComputeChecksums = c.DisableComputeChecksums
+ }
+
+ if newcfg.S3ForcePathStyle {
+ cfg.S3ForcePathStyle = newcfg.S3ForcePathStyle
+ } else {
+ cfg.S3ForcePathStyle = c.S3ForcePathStyle
+ }
+
+ return &cfg
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config_test.go
new file mode 100644
index 0000000..fc5a77b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config_test.go
@@ -0,0 +1,92 @@
+package aws
+
+import (
+ "net/http"
+ "os"
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/credentials"
+)
+
+var testCredentials = credentials.NewChainCredentials([]credentials.Provider{
+ &credentials.EnvProvider{},
+ &credentials.SharedCredentialsProvider{
+ Filename: "TestFilename",
+ Profile: "TestProfile"},
+ &credentials.EC2RoleProvider{ExpiryWindow: 5 * time.Minute},
+})
+
+var copyTestConfig = Config{
+ Credentials: testCredentials,
+ Endpoint: "CopyTestEndpoint",
+ Region: "COPY_TEST_AWS_REGION",
+ DisableSSL: true,
+ ManualSend: true,
+ HTTPClient: http.DefaultClient,
+ LogHTTPBody: true,
+ LogLevel: 2,
+ Logger: os.Stdout,
+ MaxRetries: DefaultRetries,
+ DisableParamValidation: true,
+ DisableComputeChecksums: true,
+ S3ForcePathStyle: true,
+}
+
+func TestCopy(t *testing.T) {
+ want := copyTestConfig
+ got := copyTestConfig.Copy()
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("Copy() = %+v", got)
+ t.Errorf(" want %+v", want)
+ }
+}
+
+func TestCopyReturnsNewInstance(t *testing.T) {
+ want := copyTestConfig
+ got := copyTestConfig.Copy()
+ if &got == &want {
+ t.Errorf("Copy() = %p; want different instance as source %p", &got, &want)
+ }
+}
+
+var mergeTestZeroValueConfig = Config{MaxRetries: DefaultRetries}
+
+var mergeTestConfig = Config{
+ Credentials: testCredentials,
+ Endpoint: "MergeTestEndpoint",
+ Region: "MERGE_TEST_AWS_REGION",
+ DisableSSL: true,
+ ManualSend: true,
+ HTTPClient: http.DefaultClient,
+ LogHTTPBody: true,
+ LogLevel: 2,
+ Logger: os.Stdout,
+ MaxRetries: 10,
+ DisableParamValidation: true,
+ DisableComputeChecksums: true,
+ S3ForcePathStyle: true,
+}
+
+var mergeTests = []struct {
+ cfg *Config
+ in *Config
+ want *Config
+}{
+ {&Config{}, nil, &Config{}},
+ {&Config{}, &mergeTestZeroValueConfig, &Config{}},
+ {&Config{}, &mergeTestConfig, &mergeTestConfig},
+}
+
+func TestMerge(t *testing.T) {
+ for _, tt := range mergeTests {
+ got := tt.cfg.Merge(tt.in)
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("Config %+v", tt.cfg)
+ t.Errorf(" Merge(%+v)", tt.in)
+ t.Errorf(" got %+v", got)
+ t.Errorf(" want %+v", tt.want)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
new file mode 100644
index 0000000..73ff1b9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
@@ -0,0 +1,81 @@
+package credentials
+
+import (
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+var (
+ // ErrNoValidProvidersFoundInChain Is returned when there are no valid
+ // providers in the ChainProvider.
+ ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", "no valid providers in chain", nil)
+)
+
+// A ChainProvider will search for a provider which returns credentials
+// and cache that provider until Retrieve is called again.
+//
+// The ChainProvider provides a way of chaining multiple providers together
+// which will pick the first available using priority order of the Providers
+// in the list.
+//
+// If none of the Providers retrieve valid credentials Value, ChainProvider's
+// Retrieve() will return the error ErrNoValidProvidersFoundInChain.
+//
+// If a Provider is found which returns valid credentials Value ChainProvider
+// will cache that Provider for all calls to IsExpired(), until Retrieve is
+// called again.
+//
+// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider.
+// In this example EnvProvider will first check if any credentials are available
+// vai the environment variables. If there are none ChainProvider will check
+// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider
+// does not return any credentials ChainProvider will return the error
+// ErrNoValidProvidersFoundInChain
+//
+// creds := NewChainCredentials(
+// []Provider{
+// &EnvProvider{},
+// &EC2RoleProvider{},
+// })
+// creds.Retrieve()
+//
+type ChainProvider struct {
+ Providers []Provider
+ curr Provider
+}
+
+// NewChainCredentials returns a pointer to a new Credentials object
+// wrapping a chain of providers.
+func NewChainCredentials(providers []Provider) *Credentials {
+ return NewCredentials(&ChainProvider{
+ Providers: append([]Provider{}, providers...),
+ })
+}
+
+// Retrieve returns the credentials value or error if no provider returned
+// without error.
+//
+// If a provider is found it will be cached and any calls to IsExpired()
+// will return the expired state of the cached provider.
+func (c *ChainProvider) Retrieve() (Value, error) {
+ for _, p := range c.Providers {
+ if creds, err := p.Retrieve(); err == nil {
+ c.curr = p
+ return creds, nil
+ }
+ }
+ c.curr = nil
+
+ // TODO better error reporting. maybe report error for each failed retrieve?
+
+ return Value{}, ErrNoValidProvidersFoundInChain
+}
+
+// IsExpired will returned the expired state of the currently cached provider
+// if there is one. If there is no current provider, true will be returned.
+func (c *ChainProvider) IsExpired() bool {
+ if c.curr != nil {
+ return c.curr.IsExpired()
+ }
+
+ return true
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider_test.go
new file mode 100644
index 0000000..4fba22f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider_test.go
@@ -0,0 +1,73 @@
+package credentials
+
+import (
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestChainProviderGet(t *testing.T) {
+ p := &ChainProvider{
+ Providers: []Provider{
+ &stubProvider{err: awserr.New("FirstError", "first provider error", nil)},
+ &stubProvider{err: awserr.New("SecondError", "second provider error", nil)},
+ &stubProvider{
+ creds: Value{
+ AccessKeyID: "AKID",
+ SecretAccessKey: "SECRET",
+ SessionToken: "",
+ },
+ },
+ },
+ }
+
+ creds, err := p.Retrieve()
+ assert.Nil(t, err, "Expect no error")
+ assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match")
+ assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match")
+ assert.Empty(t, creds.SessionToken, "Expect session token to be empty")
+}
+
+func TestChainProviderIsExpired(t *testing.T) {
+ stubProvider := &stubProvider{expired: true}
+ p := &ChainProvider{
+ Providers: []Provider{
+ stubProvider,
+ },
+ }
+
+ assert.True(t, p.IsExpired(), "Expect expired to be true before any Retrieve")
+ _, err := p.Retrieve()
+ assert.Nil(t, err, "Expect no error")
+ assert.False(t, p.IsExpired(), "Expect not expired after retrieve")
+
+ stubProvider.expired = true
+ assert.True(t, p.IsExpired(), "Expect return of expired provider")
+
+ _, err = p.Retrieve()
+ assert.False(t, p.IsExpired(), "Expect not expired after retrieve")
+}
+
+func TestChainProviderWithNoProvider(t *testing.T) {
+ p := &ChainProvider{
+ Providers: []Provider{},
+ }
+
+ assert.True(t, p.IsExpired(), "Expect expired with no providers")
+ _, err := p.Retrieve()
+ assert.Equal(t, ErrNoValidProvidersFoundInChain, err, "Expect no providers error returned")
+}
+
+func TestChainProviderWithNoValidProvider(t *testing.T) {
+ p := &ChainProvider{
+ Providers: []Provider{
+ &stubProvider{err: awserr.New("FirstError", "first provider error", nil)},
+ &stubProvider{err: awserr.New("SecondError", "second provider error", nil)},
+ },
+ }
+
+ assert.True(t, p.IsExpired(), "Expect expired with no providers")
+ _, err := p.Retrieve()
+ assert.Equal(t, ErrNoValidProvidersFoundInChain, err, "Expect no providers error returned")
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
new file mode 100644
index 0000000..3d6ac4d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
@@ -0,0 +1,219 @@
+// Package credentials provides credential retrieval and management
+//
+// The Credentials is the primary method of getting access to and managing
+// credentials Values. Using dependency injection retrieval of the credential
+// values is handled by a object which satisfies the Provider interface.
+//
+// By default the Credentials.Get() will cache the successful result of a
+// Provider's Retrieve() until Provider.IsExpired() returns true. At which
+// point Credentials will call Provider's Retrieve() to get new credential Value.
+//
+// The Provider is responsible for determining when credentials Value have expired.
+// It is also important to note that Credentials will always call Retrieve the
+// first time Credentials.Get() is called.
+//
+// Example of using the environment variable credentials.
+//
+// creds := NewEnvCredentials()
+//
+// // Retrieve the credentials value
+// credValue, err := creds.Get()
+// if err != nil {
+// // handle error
+// }
+//
+// Example of forcing credentials to expire and be refreshed on the next Get().
+// This may be helpful to proactively expire credentials and refresh them sooner
+// than they would naturally expire on their own.
+//
+// creds := NewCredentials(&EC2RoleProvider{})
+// creds.Expire()
+// credsValue, err := creds.Get()
+// // New credentials will be retrieved instead of from cache.
+//
+//
+// Custom Provider
+//
+// Each Provider built into this package also provides a helper method to generate
+// a Credentials pointer setup with the provider. To use a custom Provider just
+// create a type which satisfies the Provider interface and pass it to the
+// NewCredentials method.
+//
+// type MyProvider struct{}
+// func (m *MyProvider) Retrieve() (Value, error) {...}
+// func (m *MyProvider) IsExpired() bool {...}
+//
+// creds := NewCredentials(&MyProvider{})
+// credValue, err := creds.Get()
+//
+package credentials
+
+import (
+ "sync"
+ "time"
+)
+
+// Create an empty Credential object that can be used as dummy placeholder
+// credentials for requests that do not need signed.
+//
+// This Credentials can be used to configure a service to not sign requests
+// when making service API calls. For example, when accessing public
+// s3 buckets.
+//
+// svc := s3.New(&aws.Config{Credentials: AnonymousCredentials})
+// // Access public S3 buckets.
+//
+var AnonymousCredentials = NewStaticCredentials("", "", "")
+
+// A Value is the AWS credentials value for individual credential fields.
+type Value struct {
+ // AWS Access key ID
+ AccessKeyID string
+
+ // AWS Secret Access Key
+ SecretAccessKey string
+
+ // AWS Session Token
+ SessionToken string
+}
+
+// A Provider is the interface for any component which will provide credentials
+// Value. A provider is required to manage its own Expired state, and what to
+// be expired means.
+//
+// The Provider should not need to implement its own mutexes, because
+// that will be managed by Credentials.
+type Provider interface {
+ // Refresh returns nil if it successfully retrieved the value.
+ // Error is returned if the value were not obtainable, or empty.
+ Retrieve() (Value, error)
+
+ // IsExpired returns if the credentials are no longer valid, and need
+ // to be retrieved.
+ IsExpired() bool
+}
+
+// A Expiry provides shared expiration logic to be used by credentials
+// providers to implement expiry functionality.
+//
+// The best method to use this struct is as an anonymous field within the
+// provider's struct.
+//
+// Example:
+// type EC2RoleProvider struct {
+// Expiry
+// ...
+// }
+type Expiry struct {
+ // The date/time when to expire on
+ expiration time.Time
+
+ // If set will be used by IsExpired to determine the current time.
+ // Defaults to time.Now if CurrentTime is not set. Available for testing
+ // to be able to mock out the current time.
+ CurrentTime func() time.Time
+}
+
+// SetExpiration sets the expiration IsExpired will check when called.
+//
+// If window is greater than 0 the expiration time will be reduced by the
+// window value.
+//
+// Using a window is helpful to trigger credentials to expire sooner than
+// the expiration time given to ensure no requests are made with expired
+// tokens.
+func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
+ e.expiration = expiration
+ if window > 0 {
+ e.expiration = e.expiration.Add(-window)
+ }
+}
+
+// IsExpired returns if the credentials are expired.
+func (e *Expiry) IsExpired() bool {
+ if e.CurrentTime == nil {
+ e.CurrentTime = time.Now
+ }
+ return e.expiration.Before(e.CurrentTime())
+}
+
+// A Credentials provides synchronous safe retrieval of AWS credentials Value.
+// Credentials will cache the credentials value until they expire. Once the value
+// expires the next Get will attempt to retrieve valid credentials.
+//
+// Credentials is safe to use across multiple goroutines and will manage the
+// synchronous state so the Providers do not need to implement their own
+// synchronization.
+//
+// The first Credentials.Get() will always call Provider.Retrieve() to get the
+// first instance of the credentials Value. All calls to Get() after that
+// will return the cached credentials Value until IsExpired() returns true.
+type Credentials struct {
+ creds Value
+ forceRefresh bool
+ m sync.Mutex
+
+ provider Provider
+}
+
+// NewCredentials returns a pointer to a new Credentials with the provider set.
+func NewCredentials(provider Provider) *Credentials {
+ return &Credentials{
+ provider: provider,
+ forceRefresh: true,
+ }
+}
+
+// Get returns the credentials value, or error if the credentials Value failed
+// to be retrieved.
+//
+// Will return the cached credentials Value if it has not expired. If the
+// credentials Value has expired the Provider's Retrieve() will be called
+// to refresh the credentials.
+//
+// If Credentials.Expire() was called the credentials Value will be force
+// expired, and the next call to Get() will cause them to be refreshed.
+func (c *Credentials) Get() (Value, error) {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ if c.isExpired() {
+ creds, err := c.provider.Retrieve()
+ if err != nil {
+ return Value{}, err
+ }
+ c.creds = creds
+ c.forceRefresh = false
+ }
+
+ return c.creds, nil
+}
+
+// Expire expires the credentials and forces them to be retrieved on the
+// next call to Get().
+//
+// This will override the Provider's expired state, and force Credentials
+// to call the Provider's Retrieve().
+func (c *Credentials) Expire() {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ c.forceRefresh = true
+}
+
+// IsExpired returns if the credentials are no longer valid, and need
+// to be retrieved.
+//
+// If the Credentials were forced to be expired with Expire() this will
+// reflect that override.
+func (c *Credentials) IsExpired() bool {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ return c.isExpired()
+}
+
+// isExpired helper method wrapping the definition of expired credentials.
+func (c *Credentials) isExpired() bool {
+ return c.forceRefresh || c.provider.IsExpired()
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials_test.go
new file mode 100644
index 0000000..99c2b47
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials_test.go
@@ -0,0 +1,62 @@
+package credentials
+
+import (
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/stretchr/testify/assert"
+)
+
+type stubProvider struct {
+ creds Value
+ expired bool
+ err error
+}
+
+func (s *stubProvider) Retrieve() (Value, error) {
+ s.expired = false
+ return s.creds, s.err
+}
+func (s *stubProvider) IsExpired() bool {
+ return s.expired
+}
+
+func TestCredentialsGet(t *testing.T) {
+ c := NewCredentials(&stubProvider{
+ creds: Value{
+ AccessKeyID: "AKID",
+ SecretAccessKey: "SECRET",
+ SessionToken: "",
+ },
+ expired: true,
+ })
+
+ creds, err := c.Get()
+ assert.Nil(t, err, "Expected no error")
+ assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match")
+ assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match")
+ assert.Empty(t, creds.SessionToken, "Expect session token to be empty")
+}
+
+func TestCredentialsGetWithError(t *testing.T) {
+ c := NewCredentials(&stubProvider{err: awserr.New("provider error", "", nil), expired: true})
+
+ _, err := c.Get()
+ assert.Equal(t, "provider error", err.(awserr.Error).Code(), "Expected provider error")
+}
+
+func TestCredentialsExpire(t *testing.T) {
+ stub := &stubProvider{}
+ c := NewCredentials(stub)
+
+ stub.expired = false
+ assert.True(t, c.IsExpired(), "Expected to start out expired")
+ c.Expire()
+ assert.True(t, c.IsExpired(), "Expected to be expired")
+
+ c.forceRefresh = false
+ assert.False(t, c.IsExpired(), "Expected not to be expired")
+
+ stub.expired = true
+ assert.True(t, c.IsExpired(), "Expected to be expired")
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2_role_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2_role_provider.go
new file mode 100644
index 0000000..7691b62
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2_role_provider.go
@@ -0,0 +1,163 @@
+package credentials
+
+import (
+ "bufio"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+const metadataCredentialsEndpoint = "http://169.254.169.254/latest/meta-data/iam/security-credentials/"
+
+// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if
+// those credentials are expired.
+//
+// Example how to configure the EC2RoleProvider with custom http Client, Endpoint
+// or ExpiryWindow
+//
+// p := &credentials.EC2RoleProvider{
+// // Pass in a custom timeout to be used when requesting
+// // IAM EC2 Role credentials.
+// Client: &http.Client{
+// Timeout: 10 * time.Second,
+// },
+// // Use default EC2 Role metadata endpoint, Alternate endpoints can be
+// // specified setting Endpoint to something else.
+// Endpoint: "",
+// // Do not use early expiry of credentials. If a non zero value is
+// // specified the credentials will be expired early
+// ExpiryWindow: 0,
+// }
+//
+type EC2RoleProvider struct {
+ Expiry
+
+ // Endpoint must be fully quantified URL
+ Endpoint string
+
+ // HTTP client to use when connecting to EC2 service
+ Client *http.Client
+
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to
+ // the credentials actually expiring. This is beneficial so race conditions
+ // with expiring credentials do not cause request to fail unexpectedly
+ // due to ExpiredTokenException exceptions.
+ //
+ // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+ // 10 seconds before the credentials are actually expired.
+ //
+ // If ExpiryWindow is 0 or less it will be ignored.
+ ExpiryWindow time.Duration
+}
+
+// NewEC2RoleCredentials returns a pointer to a new Credentials object
+// wrapping the EC2RoleProvider.
+//
+// Takes a custom http.Client which can be configured for custom handling of
+// things such as timeout.
+//
+// Endpoint is the URL that the EC2RoleProvider will connect to when retrieving
+// role and credentials.
+//
+// Window is the expiry window that will be subtracted from the expiry returned
+// by the role credential request. This is done so that the credentials will
+// expire sooner than their actual lifespan.
+func NewEC2RoleCredentials(client *http.Client, endpoint string, window time.Duration) *Credentials {
+ return NewCredentials(&EC2RoleProvider{
+ Endpoint: endpoint,
+ Client: client,
+ ExpiryWindow: window,
+ })
+}
+
+// Retrieve retrieves credentials from the EC2 service.
+// Error will be returned if the request fails, or unable to extract
+// the desired credentials.
+func (m *EC2RoleProvider) Retrieve() (Value, error) {
+ if m.Client == nil {
+ m.Client = http.DefaultClient
+ }
+ if m.Endpoint == "" {
+ m.Endpoint = metadataCredentialsEndpoint
+ }
+
+ credsList, err := requestCredList(m.Client, m.Endpoint)
+ if err != nil {
+ return Value{}, err
+ }
+
+ if len(credsList) == 0 {
+ return Value{}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil)
+ }
+ credsName := credsList[0]
+
+ roleCreds, err := requestCred(m.Client, m.Endpoint, credsName)
+ if err != nil {
+ return Value{}, err
+ }
+
+ m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow)
+
+ return Value{
+ AccessKeyID: roleCreds.AccessKeyID,
+ SecretAccessKey: roleCreds.SecretAccessKey,
+ SessionToken: roleCreds.Token,
+ }, nil
+}
+
+// A ec2RoleCredRespBody provides the shape for deserializing credential
+// request responses.
+type ec2RoleCredRespBody struct {
+ Expiration time.Time
+ AccessKeyID string
+ SecretAccessKey string
+ Token string
+}
+
+// requestCredList requests a list of credentials from the EC2 service.
+// If there are no credentials, or there is an error making or receiving the request
+func requestCredList(client *http.Client, endpoint string) ([]string, error) {
+ resp, err := client.Get(endpoint)
+ if err != nil {
+ return nil, awserr.New("ListEC2Role", "failed to list EC2 Roles", err)
+ }
+ defer resp.Body.Close()
+
+ credsList := []string{}
+ s := bufio.NewScanner(resp.Body)
+ for s.Scan() {
+ credsList = append(credsList, s.Text())
+ }
+
+ if err := s.Err(); err != nil {
+ return nil, awserr.New("ReadEC2Role", "failed to read list of EC2 Roles", err)
+ }
+
+ return credsList, nil
+}
+
+// requestCred requests the credentials for a specific credentials from the EC2 service.
+//
+// If the credentials cannot be found, or there is an error reading the response
+// and error will be returned.
+func requestCred(client *http.Client, endpoint, credsName string) (*ec2RoleCredRespBody, error) {
+ resp, err := client.Get(endpoint + credsName)
+ if err != nil {
+ return nil, awserr.New("GetEC2RoleCredentials",
+ fmt.Sprintf("failed to get %s EC2 Role credentials", credsName),
+ err)
+ }
+ defer resp.Body.Close()
+
+ respCreds := &ec2RoleCredRespBody{}
+ if err := json.NewDecoder(resp.Body).Decode(respCreds); err != nil {
+ return nil, awserr.New("DecodeEC2RoleCredentials",
+ fmt.Sprintf("failed to decode %s EC2 Role credentials", credsName),
+ err)
+ }
+
+ return respCreds, nil
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2_role_provider_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2_role_provider_test.go
new file mode 100644
index 0000000..da1549a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2_role_provider_test.go
@@ -0,0 +1,108 @@
+package credentials
+
+import (
+ "fmt"
+ "github.com/stretchr/testify/assert"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+)
+
+func initTestServer(expireOn string) *httptest.Server {
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.RequestURI == "/" {
+ fmt.Fprintln(w, "/creds")
+ } else {
+ fmt.Fprintf(w, `{
+ "AccessKeyId" : "accessKey",
+ "SecretAccessKey" : "secret",
+ "Token" : "token",
+ "Expiration" : "%s"
+}`, expireOn)
+ }
+ }))
+
+ return server
+}
+
+func TestEC2RoleProvider(t *testing.T) {
+ server := initTestServer("2014-12-16T01:51:37Z")
+ defer server.Close()
+
+ p := &EC2RoleProvider{Client: http.DefaultClient, Endpoint: server.URL}
+
+ creds, err := p.Retrieve()
+ assert.Nil(t, err, "Expect no error")
+
+ assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
+ assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
+ assert.Equal(t, "token", creds.SessionToken, "Expect session token to match")
+}
+
+func TestEC2RoleProviderIsExpired(t *testing.T) {
+ server := initTestServer("2014-12-16T01:51:37Z")
+ defer server.Close()
+
+ p := &EC2RoleProvider{Client: http.DefaultClient, Endpoint: server.URL}
+ p.CurrentTime = func() time.Time {
+ return time.Date(2014, 12, 15, 21, 26, 0, 0, time.UTC)
+ }
+
+ assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve.")
+
+ _, err := p.Retrieve()
+ assert.Nil(t, err, "Expect no error")
+
+ assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve.")
+
+ p.CurrentTime = func() time.Time {
+ return time.Date(3014, 12, 15, 21, 26, 0, 0, time.UTC)
+ }
+
+ assert.True(t, p.IsExpired(), "Expect creds to be expired.")
+}
+
+func TestEC2RoleProviderExpiryWindowIsExpired(t *testing.T) {
+ server := initTestServer("2014-12-16T01:51:37Z")
+ defer server.Close()
+
+ p := &EC2RoleProvider{Client: http.DefaultClient, Endpoint: server.URL, ExpiryWindow: time.Hour * 1}
+ p.CurrentTime = func() time.Time {
+ return time.Date(2014, 12, 15, 0, 51, 37, 0, time.UTC)
+ }
+
+ assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve.")
+
+ _, err := p.Retrieve()
+ assert.Nil(t, err, "Expect no error")
+
+ assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve.")
+
+ p.CurrentTime = func() time.Time {
+ return time.Date(2014, 12, 16, 0, 55, 37, 0, time.UTC)
+ }
+
+ assert.True(t, p.IsExpired(), "Expect creds to be expired.")
+}
+
+func BenchmarkEC2RoleProvider(b *testing.B) {
+ server := initTestServer("2014-12-16T01:51:37Z")
+ defer server.Close()
+
+ p := &EC2RoleProvider{Client: http.DefaultClient, Endpoint: server.URL}
+ _, err := p.Retrieve()
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ b.ResetTimer()
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ _, err := p.Retrieve()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
new file mode 100644
index 0000000..3e556be
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
@@ -0,0 +1,67 @@
+package credentials
+
+import (
+ "os"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+var (
+ // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be
+ // found in the process's environment.
+ ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil)
+ // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key
+ // can't be found in the process's environment.
+ ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil)
+)
+
+// A EnvProvider retrieves credentials from the environment variables of the
+// running process. Environment credentials never expire.
+//
+// Environment variables used:
+// - Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
+// - Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
+type EnvProvider struct {
+ retrieved bool
+}
+
+// NewEnvCredentials returns a pointer to a new Credentials object
+// wrapping the environment variable provider.
+func NewEnvCredentials() *Credentials {
+ return NewCredentials(&EnvProvider{})
+}
+
+// Retrieve retrieves the keys from the environment.
+func (e *EnvProvider) Retrieve() (Value, error) {
+ e.retrieved = false
+
+ id := os.Getenv("AWS_ACCESS_KEY_ID")
+ if id == "" {
+ id = os.Getenv("AWS_ACCESS_KEY")
+ }
+
+ secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
+ if secret == "" {
+ secret = os.Getenv("AWS_SECRET_KEY")
+ }
+
+ if id == "" {
+ return Value{}, ErrAccessKeyIDNotFound
+ }
+
+ if secret == "" {
+ return Value{}, ErrSecretAccessKeyNotFound
+ }
+
+ e.retrieved = true
+ return Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
+ }, nil
+}
+
+// IsExpired returns if the credentials have been retrieved.
+func (e *EnvProvider) IsExpired() bool {
+ return !e.retrieved
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider_test.go
new file mode 100644
index 0000000..53f6ce2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider_test.go
@@ -0,0 +1,70 @@
+package credentials
+
+import (
+ "github.com/stretchr/testify/assert"
+ "os"
+ "testing"
+)
+
+func TestEnvProviderRetrieve(t *testing.T) {
+ os.Clearenv()
+ os.Setenv("AWS_ACCESS_KEY_ID", "access")
+ os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
+ os.Setenv("AWS_SESSION_TOKEN", "token")
+
+ e := EnvProvider{}
+ creds, err := e.Retrieve()
+ assert.Nil(t, err, "Expect no error")
+
+ assert.Equal(t, "access", creds.AccessKeyID, "Expect access key ID to match")
+ assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
+ assert.Equal(t, "token", creds.SessionToken, "Expect session token to match")
+}
+
+func TestEnvProviderIsExpired(t *testing.T) {
+ os.Clearenv()
+ os.Setenv("AWS_ACCESS_KEY_ID", "access")
+ os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
+ os.Setenv("AWS_SESSION_TOKEN", "token")
+
+ e := EnvProvider{}
+
+ assert.True(t, e.IsExpired(), "Expect creds to be expired before retrieve.")
+
+ _, err := e.Retrieve()
+ assert.Nil(t, err, "Expect no error")
+
+ assert.False(t, e.IsExpired(), "Expect creds to not be expired after retrieve.")
+}
+
+func TestEnvProviderNoAccessKeyID(t *testing.T) {
+ os.Clearenv()
+ os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
+
+ e := EnvProvider{}
+ creds, err := e.Retrieve()
+ assert.Equal(t, ErrAccessKeyIDNotFound, err, "ErrAccessKeyIDNotFound expected, but was %#v error: %#v", creds, err)
+}
+
+func TestEnvProviderNoSecretAccessKey(t *testing.T) {
+ os.Clearenv()
+ os.Setenv("AWS_ACCESS_KEY_ID", "access")
+
+ e := EnvProvider{}
+ creds, err := e.Retrieve()
+ assert.Equal(t, ErrSecretAccessKeyNotFound, err, "ErrSecretAccessKeyNotFound expected, but was %#v error: %#v", creds, err)
+}
+
+func TestEnvProviderAlternateNames(t *testing.T) {
+ os.Clearenv()
+ os.Setenv("AWS_ACCESS_KEY", "access")
+ os.Setenv("AWS_SECRET_KEY", "secret")
+
+ e := EnvProvider{}
+ creds, err := e.Retrieve()
+ assert.Nil(t, err, "Expect no error")
+
+ assert.Equal(t, "access", creds.AccessKeyID, "Expected access key ID")
+ assert.Equal(t, "secret", creds.SecretAccessKey, "Expected secret access key")
+ assert.Empty(t, creds.SessionToken, "Expected no token")
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini
new file mode 100644
index 0000000..aa2dc50
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini
@@ -0,0 +1,8 @@
+[default]
+aws_access_key_id = accessKey
+aws_secret_access_key = secret
+aws_session_token = token
+
+[no_token]
+aws_access_key_id = accessKey
+aws_secret_access_key = secret
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
new file mode 100644
index 0000000..7367f73
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
@@ -0,0 +1,133 @@
+package credentials
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/vaughan0/go-ini"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+var (
+ // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found.
+ ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil)
+)
+
+// A SharedCredentialsProvider retrieves credentials from the current user's home
+// directory, and keeps track if those credentials are expired.
+//
+// Profile ini file example: $HOME/.aws/credentials
+type SharedCredentialsProvider struct {
+ // Path to the shared credentials file. If empty will default to current user's
+ // home directory.
+ Filename string
+
+ // AWS Profile to extract credentials from the shared credentials file. If empty
+ // will default to environment variable "AWS_PROFILE" or "default" if
+ // environment variable is also not set.
+ Profile string
+
+ // retrieved states if the credentials have been successfully retrieved.
+ retrieved bool
+}
+
+// NewSharedCredentials returns a pointer to a new Credentials object
+// wrapping the Profile file provider.
+func NewSharedCredentials(filename, profile string) *Credentials {
+ return NewCredentials(&SharedCredentialsProvider{
+ Filename: filename,
+ Profile: profile,
+ })
+}
+
+// Retrieve reads and extracts the shared credentials from the current
+// users home directory.
+func (p *SharedCredentialsProvider) Retrieve() (Value, error) {
+ p.retrieved = false
+
+ filename, err := p.filename()
+ if err != nil {
+ return Value{}, err
+ }
+
+ creds, err := loadProfile(filename, p.profile())
+ if err != nil {
+ return Value{}, err
+ }
+
+ p.retrieved = true
+ return creds, nil
+}
+
+// IsExpired returns if the shared credentials have expired.
+func (p *SharedCredentialsProvider) IsExpired() bool {
+ return !p.retrieved
+}
+
+// loadProfiles loads from the file pointed to by shared credentials filename for profile.
+// The credentials retrieved from the profile will be returned or error. Error will be
+// returned if it fails to read from the file, or the data is invalid.
+func loadProfile(filename, profile string) (Value, error) {
+ config, err := ini.LoadFile(filename)
+ if err != nil {
+ return Value{}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err)
+ }
+ iniProfile := config.Section(profile)
+
+ id, ok := iniProfile["aws_access_key_id"]
+ if !ok {
+ return Value{}, awserr.New("SharedCredsAccessKey",
+ fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename),
+ nil)
+ }
+
+ secret, ok := iniProfile["aws_secret_access_key"]
+ if !ok {
+ return Value{}, awserr.New("SharedCredsSecret",
+ fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename),
+ nil)
+ }
+
+ token := iniProfile["aws_session_token"]
+
+ return Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: token,
+ }, nil
+}
+
+// filename returns the filename to use to read AWS shared credentials.
+//
+// Will return an error if the user's home directory path cannot be found.
+func (p *SharedCredentialsProvider) filename() (string, error) {
+ if p.Filename == "" {
+ homeDir := os.Getenv("HOME") // *nix
+ if homeDir == "" { // Windows
+ homeDir = os.Getenv("USERPROFILE")
+ }
+ if homeDir == "" {
+ return "", ErrSharedCredentialsHomeNotFound
+ }
+
+ p.Filename = filepath.Join(homeDir, ".aws", "credentials")
+ }
+
+ return p.Filename, nil
+}
+
+// profile returns the AWS shared credentials profile. If empty will read
+// environment variable "AWS_PROFILE". If that is not set profile will
+// return "default".
+func (p *SharedCredentialsProvider) profile() string {
+ if p.Profile == "" {
+ p.Profile = os.Getenv("AWS_PROFILE")
+ }
+ if p.Profile == "" {
+ p.Profile = "default"
+ }
+
+ return p.Profile
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go
new file mode 100644
index 0000000..3621d56
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go
@@ -0,0 +1,77 @@
+package credentials
+
+import (
+ "github.com/stretchr/testify/assert"
+ "os"
+ "testing"
+)
+
+func TestSharedCredentialsProvider(t *testing.T) {
+ os.Clearenv()
+
+ p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""}
+ creds, err := p.Retrieve()
+ assert.Nil(t, err, "Expect no error")
+
+ assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
+ assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
+ assert.Equal(t, "token", creds.SessionToken, "Expect session token to match")
+}
+
+func TestSharedCredentialsProviderIsExpired(t *testing.T) {
+ os.Clearenv()
+
+ p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""}
+
+ assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve")
+
+ _, err := p.Retrieve()
+ assert.Nil(t, err, "Expect no error")
+
+ assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve")
+}
+
+func TestSharedCredentialsProviderWithAWS_PROFILE(t *testing.T) {
+ os.Clearenv()
+ os.Setenv("AWS_PROFILE", "no_token")
+
+ p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""}
+ creds, err := p.Retrieve()
+ assert.Nil(t, err, "Expect no error")
+
+ assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
+ assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
+ assert.Empty(t, creds.SessionToken, "Expect no token")
+}
+
+func TestSharedCredentialsProviderWithoutTokenFromProfile(t *testing.T) {
+ os.Clearenv()
+
+ p := SharedCredentialsProvider{Filename: "example.ini", Profile: "no_token"}
+ creds, err := p.Retrieve()
+ assert.Nil(t, err, "Expect no error")
+
+ assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
+ assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
+ assert.Empty(t, creds.SessionToken, "Expect no token")
+}
+
+func BenchmarkSharedCredentialsProvider(b *testing.B) {
+ os.Clearenv()
+
+ p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""}
+ _, err := p.Retrieve()
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ b.ResetTimer()
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ _, err := p.Retrieve()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
new file mode 100644
index 0000000..a114713
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
@@ -0,0 +1,42 @@
+package credentials
+
+import (
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+var (
+ // ErrStaticCredentialsEmpty is emitted when static credentials are empty.
+ ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
+)
+
+// A StaticProvider is a set of credentials which are set pragmatically,
+// and will never expire.
+type StaticProvider struct {
+ Value
+}
+
+// NewStaticCredentials returns a pointer to a new Credentials object
+// wrapping a static credentials value provider.
+func NewStaticCredentials(id, secret, token string) *Credentials {
+ return NewCredentials(&StaticProvider{Value: Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: token,
+ }})
+}
+
+// Retrieve returns the credentials or error if the credentials are invalid.
+func (s *StaticProvider) Retrieve() (Value, error) {
+ if s.AccessKeyID == "" || s.SecretAccessKey == "" {
+ return Value{}, ErrStaticCredentialsEmpty
+ }
+
+ return s.Value, nil
+}
+
+// IsExpired returns if the credentials are expired.
+//
+// For StaticProvider, the credentials never expired.
+func (s *StaticProvider) IsExpired() bool {
+ return false
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider_test.go
new file mode 100644
index 0000000..ea01236
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider_test.go
@@ -0,0 +1,34 @@
+package credentials
+
+import (
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestStaticProviderGet(t *testing.T) {
+ s := StaticProvider{
+ Value: Value{
+ AccessKeyID: "AKID",
+ SecretAccessKey: "SECRET",
+ SessionToken: "",
+ },
+ }
+
+ creds, err := s.Retrieve()
+ assert.Nil(t, err, "Expect no error")
+ assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match")
+ assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match")
+ assert.Empty(t, creds.SessionToken, "Expect no session token")
+}
+
+func TestStaticProviderIsExpired(t *testing.T) {
+ s := StaticProvider{
+ Value: Value{
+ AccessKeyID: "AKID",
+ SecretAccessKey: "SECRET",
+ SessionToken: "",
+ },
+ }
+
+ assert.False(t, s.IsExpired(), "Expect static credentials to never expire")
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
new file mode 100644
index 0000000..1499b44
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
@@ -0,0 +1,120 @@
+// Package stscreds are credential Providers to retrieve STS AWS credentials.
+//
+// STS provides multiple ways to retrieve credentials which can be used when making
+// future AWS service API operation calls.
+package stscreds
+
+import (
+ "fmt"
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/service/sts"
+ "time"
+)
+
+// AssumeRoler represents the minimal subset of the STS client API used by this provider.
+type AssumeRoler interface {
+ AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
+}
+
+// AssumeRoleProvider retrieves temporary credentials from the STS service, and
+// keeps track of their expiration time. This provider must be used explicitly,
+// as it is not included in the credentials chain.
+//
+// Example how to configure a service to use this provider:
+//
+// config := &aws.Config{
+// Credentials: stscreds.NewCredentials(nil, "arn-of-the-role-to-assume", 10*time.Second),
+// })
+// // Use config for creating your AWS service.
+//
+// Example how to obtain customised credentials:
+//
+// provider := &stscreds.Provider{
+// // Extend the duration to 1 hour.
+// Duration: time.Hour,
+// // Custom role name.
+// RoleSessionName: "custom-session-name",
+// }
+// creds := credentials.NewCredentials(provider)
+//
+type AssumeRoleProvider struct {
+ credentials.Expiry
+
+ // Custom STS client. If not set the default STS client will be used.
+ Client AssumeRoler
+
+ // Role to be assumed.
+ RoleARN string
+
+ // Session name, if you wish to reuse the credentials elsewhere.
+ RoleSessionName string
+
+ // Expiry duration of the STS credentials. Defaults to 15 minutes if not set.
+ Duration time.Duration
+
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to
+ // the credentials actually expiring. This is beneficial so race conditions
+ // with expiring credentials do not cause request to fail unexpectedly
+ // due to ExpiredTokenException exceptions.
+ //
+ // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+ // 10 seconds before the credentials are actually expired.
+ //
+ // If ExpiryWindow is 0 or less it will be ignored.
+ ExpiryWindow time.Duration
+}
+
+// NewCredentials returns a pointer to a new Credentials object wrapping the
+// AssumeRoleProvider. The credentials will expire every 15 minutes and the
+// role will be named after a nanosecond timestamp of this operation.
+//
+// The sts and roleARN parameters are used for building the "AssumeRole" call.
+// Pass nil as sts to use the default client.
+//
+// Window is the expiry window that will be subtracted from the expiry returned
+// by the role credential request. This is done so that the credentials will
+// expire sooner than their actual lifespan.
+func NewCredentials(client AssumeRoler, roleARN string, window time.Duration) *credentials.Credentials {
+ return credentials.NewCredentials(&AssumeRoleProvider{
+ Client: client,
+ RoleARN: roleARN,
+ ExpiryWindow: window,
+ })
+}
+
+// Retrieve generates a new set of temporary credentials using STS.
+func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
+
+ // Apply defaults where parameters are not set.
+ if p.Client == nil {
+ p.Client = sts.New(nil)
+ }
+ if p.RoleSessionName == "" {
+ // Try to work out a role name that will hopefully end up unique.
+ p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano())
+ }
+ if p.Duration == 0 {
+ // Expire as often as AWS permits.
+ p.Duration = 15 * time.Minute
+ }
+
+ roleOutput, err := p.Client.AssumeRole(&sts.AssumeRoleInput{
+ DurationSeconds: aws.Long(int64(p.Duration / time.Second)),
+ RoleARN: aws.String(p.RoleARN),
+ RoleSessionName: aws.String(p.RoleSessionName),
+ })
+
+ if err != nil {
+ return credentials.Value{}, err
+ }
+
+ // We will proactively generate new credentials before they expire.
+ p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow)
+
+ return credentials.Value{
+ AccessKeyID: *roleOutput.Credentials.AccessKeyID,
+ SecretAccessKey: *roleOutput.Credentials.SecretAccessKey,
+ SessionToken: *roleOutput.Credentials.SessionToken,
+ }, nil
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider_test.go
new file mode 100644
index 0000000..98b7690
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider_test.go
@@ -0,0 +1,58 @@
+package stscreds
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/sts"
+ "github.com/stretchr/testify/assert"
+ "testing"
+ "time"
+)
+
+type stubSTS struct {
+}
+
+func (s *stubSTS) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) {
+ expiry := time.Now().Add(60 * time.Minute)
+ return &sts.AssumeRoleOutput{
+ Credentials: &sts.Credentials{
+ // Just reflect the role arn to the provider.
+ AccessKeyID: input.RoleARN,
+ SecretAccessKey: aws.String("assumedSecretAccessKey"),
+ SessionToken: aws.String("assumedSessionToken"),
+ Expiration: &expiry,
+ },
+ }, nil
+}
+
+func TestAssumeRoleProvider(t *testing.T) {
+ stub := &stubSTS{}
+ p := &AssumeRoleProvider{
+ Client: stub,
+ RoleARN: "roleARN",
+ }
+
+ creds, err := p.Retrieve()
+ assert.Nil(t, err, "Expect no error")
+
+ assert.Equal(t, "roleARN", creds.AccessKeyID, "Expect access key ID to be reflected role ARN")
+ assert.Equal(t, "assumedSecretAccessKey", creds.SecretAccessKey, "Expect secret access key to match")
+ assert.Equal(t, "assumedSessionToken", creds.SessionToken, "Expect session token to match")
+}
+
+func BenchmarkAssumeRoleProvider(b *testing.B) {
+ stub := &stubSTS{}
+ p := &AssumeRoleProvider{
+ Client: stub,
+ RoleARN: "roleARN",
+ }
+
+ b.ResetTimer()
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ _, err := p.Retrieve()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handler_functions.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handler_functions.go
new file mode 100644
index 0000000..a2a88a9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handler_functions.go
@@ -0,0 +1,153 @@
+package aws
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+var sleepDelay = func(delay time.Duration) {
+ time.Sleep(delay)
+}
+
+// Interface for matching types which also have a Len method.
+type lener interface {
+ Len() int
+}
+
+// BuildContentLength builds the content length of a request based on the body,
+// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
+// to determine request body length and no "Content-Length" was specified it will panic.
+func BuildContentLength(r *Request) {
+ if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
+ length, _ := strconv.ParseInt(slength, 10, 64)
+ r.HTTPRequest.ContentLength = length
+ return
+ }
+
+ var length int64
+ switch body := r.Body.(type) {
+ case nil:
+ length = 0
+ case lener:
+ length = int64(body.Len())
+ case io.Seeker:
+ r.bodyStart, _ = body.Seek(0, 1)
+ end, _ := body.Seek(0, 2)
+ body.Seek(r.bodyStart, 0) // make sure to seek back to original location
+ length = end - r.bodyStart
+ default:
+ panic("Cannot get length of body, must provide `ContentLength`")
+ }
+
+ r.HTTPRequest.ContentLength = length
+ r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
+}
+
+// UserAgentHandler is a request handler for injecting User agent into requests.
+func UserAgentHandler(r *Request) {
+ r.HTTPRequest.Header.Set("User-Agent", SDKName+"/"+SDKVersion)
+}
+
+var reStatusCode = regexp.MustCompile(`^(\d+)`)
+
+// SendHandler is a request handler to send service request using HTTP client.
+func SendHandler(r *Request) {
+ var err error
+ r.HTTPResponse, err = r.Service.Config.HTTPClient.Do(r.HTTPRequest)
+ if err != nil {
+ // Capture the case where url.Error is returned for error processing
+ // response. e.g. 301 without location header comes back as string
+ // error and r.HTTPResponse is nil. Other url redirect errors will
+ // comeback in a similar method.
+ if e, ok := err.(*url.Error); ok {
+ if s := reStatusCode.FindStringSubmatch(e.Error()); s != nil {
+ code, _ := strconv.ParseInt(s[1], 10, 64)
+ r.HTTPResponse = &http.Response{
+ StatusCode: int(code),
+ Status: http.StatusText(int(code)),
+ Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
+ }
+ return
+ }
+ }
+ if r.HTTPRequest == nil {
+ // Add a dummy request response object to ensure the HTTPResponse
+ // value is consistent.
+ r.HTTPResponse = &http.Response{
+ StatusCode: int(0),
+ Status: http.StatusText(int(0)),
+ Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
+ }
+ }
+ // Catch all other request errors.
+ r.Error = awserr.New("RequestError", "send request failed", err)
+ r.Retryable.Set(true) // network errors are retryable
+ }
+}
+
+// ValidateResponseHandler is a request handler to validate service response.
+func ValidateResponseHandler(r *Request) {
+ if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 {
+ // this may be replaced by an UnmarshalError handler
+ r.Error = awserr.New("UnknownError", "unknown error", nil)
+ }
+}
+
+// AfterRetryHandler performs final checks to determine if the request should
+// be retried and how long to delay.
+func AfterRetryHandler(r *Request) {
+ // If one of the other handlers already set the retry state
+ // we don't want to override it based on the service's state
+ if !r.Retryable.IsSet() {
+ r.Retryable.Set(r.Service.ShouldRetry(r))
+ }
+
+ if r.WillRetry() {
+ r.RetryDelay = r.Service.RetryRules(r)
+ sleepDelay(r.RetryDelay)
+
+ // when the expired token exception occurs the credentials
+ // need to be expired locally so that the next request to
+ // get credentials will trigger a credentials refresh.
+ if r.Error != nil {
+ if err, ok := r.Error.(awserr.Error); ok {
+ if isCodeExpiredCreds(err.Code()) {
+ r.Config.Credentials.Expire()
+ }
+ }
+ }
+
+ r.RetryCount++
+ r.Error = nil
+ }
+}
+
+var (
+ // ErrMissingRegion is an error that is returned if region configuration is
+ // not found.
+ ErrMissingRegion error = awserr.New("MissingRegion", "could not find region configuration", nil)
+
+ // ErrMissingEndpoint is an error that is returned if an endpoint cannot be
+ // resolved for a service.
+ ErrMissingEndpoint error = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
+)
+
+// ValidateEndpointHandler is a request handler to validate a request had the
+// appropriate Region and Endpoint set. Will set r.Error if the endpoint or
+// region is not valid.
+func ValidateEndpointHandler(r *Request) {
+ if r.Service.SigningRegion == "" && r.Service.Config.Region == "" {
+ r.Error = ErrMissingRegion
+ } else if r.Service.Endpoint == "" {
+ r.Error = ErrMissingEndpoint
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handler_functions_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handler_functions_test.go
new file mode 100644
index 0000000..c6a30c0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handler_functions_test.go
@@ -0,0 +1,81 @@
+package aws
+
+import (
+ "net/http"
+ "os"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestValidateEndpointHandler(t *testing.T) {
+ os.Clearenv()
+ svc := NewService(&Config{Region: "us-west-2"})
+ svc.Handlers.Clear()
+ svc.Handlers.Validate.PushBack(ValidateEndpointHandler)
+
+ req := NewRequest(svc, &Operation{Name: "Operation"}, nil, nil)
+ err := req.Build()
+
+ assert.NoError(t, err)
+}
+
+func TestValidateEndpointHandlerErrorRegion(t *testing.T) {
+ os.Clearenv()
+ svc := NewService(nil)
+ svc.Handlers.Clear()
+ svc.Handlers.Validate.PushBack(ValidateEndpointHandler)
+
+ req := NewRequest(svc, &Operation{Name: "Operation"}, nil, nil)
+ err := req.Build()
+
+ assert.Error(t, err)
+ assert.Equal(t, ErrMissingRegion, err)
+}
+
+type mockCredsProvider struct {
+ expired bool
+ retreiveCalled bool
+}
+
+func (m *mockCredsProvider) Retrieve() (credentials.Value, error) {
+ m.retreiveCalled = true
+ return credentials.Value{}, nil
+}
+
+func (m *mockCredsProvider) IsExpired() bool {
+ return m.expired
+}
+
+func TestAfterRetryRefreshCreds(t *testing.T) {
+ os.Clearenv()
+ credProvider := &mockCredsProvider{}
+ svc := NewService(&Config{Credentials: credentials.NewCredentials(credProvider), MaxRetries: 1})
+
+ svc.Handlers.Clear()
+ svc.Handlers.ValidateResponse.PushBack(func(r *Request) {
+ r.Error = awserr.New("UnknownError", "", nil)
+ r.HTTPResponse = &http.Response{StatusCode: 400}
+ })
+ svc.Handlers.UnmarshalError.PushBack(func(r *Request) {
+ r.Error = awserr.New("ExpiredTokenException", "", nil)
+ })
+ svc.Handlers.AfterRetry.PushBack(func(r *Request) {
+ AfterRetryHandler(r)
+ })
+
+ assert.True(t, svc.Config.Credentials.IsExpired(), "Expect to start out expired")
+ assert.False(t, credProvider.retreiveCalled)
+
+ req := NewRequest(svc, &Operation{Name: "Operation"}, nil, nil)
+ req.Send()
+
+ assert.True(t, svc.Config.Credentials.IsExpired())
+ assert.False(t, credProvider.retreiveCalled)
+
+ _, err := svc.Config.Credentials.Get()
+ assert.NoError(t, err)
+ assert.True(t, credProvider.retreiveCalled)
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handlers.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handlers.go
new file mode 100644
index 0000000..1968cb9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handlers.go
@@ -0,0 +1,85 @@
+package aws
+
+// A Handlers provides a collection of request handlers for various
+// stages of handling requests.
+type Handlers struct {
+ Validate HandlerList
+ Build HandlerList
+ Sign HandlerList
+ Send HandlerList
+ ValidateResponse HandlerList
+ Unmarshal HandlerList
+ UnmarshalMeta HandlerList
+ UnmarshalError HandlerList
+ Retry HandlerList
+ AfterRetry HandlerList
+}
+
+// copy returns of this handler's lists.
+func (h *Handlers) copy() Handlers {
+ return Handlers{
+ Validate: h.Validate.copy(),
+ Build: h.Build.copy(),
+ Sign: h.Sign.copy(),
+ Send: h.Send.copy(),
+ ValidateResponse: h.ValidateResponse.copy(),
+ Unmarshal: h.Unmarshal.copy(),
+ UnmarshalError: h.UnmarshalError.copy(),
+ UnmarshalMeta: h.UnmarshalMeta.copy(),
+ Retry: h.Retry.copy(),
+ AfterRetry: h.AfterRetry.copy(),
+ }
+}
+
+// Clear removes callback functions for all handlers
+func (h *Handlers) Clear() {
+ h.Validate.Clear()
+ h.Build.Clear()
+ h.Send.Clear()
+ h.Sign.Clear()
+ h.Unmarshal.Clear()
+ h.UnmarshalMeta.Clear()
+ h.UnmarshalError.Clear()
+ h.ValidateResponse.Clear()
+ h.Retry.Clear()
+ h.AfterRetry.Clear()
+}
+
+// A HandlerList manages zero or more handlers in a list.
+type HandlerList struct {
+ list []func(*Request)
+}
+
+// copy creates a copy of the handler list.
+func (l *HandlerList) copy() HandlerList {
+ var n HandlerList
+ n.list = append([]func(*Request){}, l.list...)
+ return n
+}
+
+// Clear clears the handler list.
+func (l *HandlerList) Clear() {
+ l.list = []func(*Request){}
+}
+
+// Len returns the number of handlers in the list.
+func (l *HandlerList) Len() int {
+ return len(l.list)
+}
+
+// PushBack pushes handlers f to the back of the handler list.
+func (l *HandlerList) PushBack(f ...func(*Request)) {
+ l.list = append(l.list, f...)
+}
+
+// PushFront pushes handlers f to the front of the handler list.
+func (l *HandlerList) PushFront(f ...func(*Request)) {
+ l.list = append(f, l.list...)
+}
+
+// Run executes all handlers in the list with a given request object.
+func (l *HandlerList) Run(r *Request) {
+ for _, f := range l.list {
+ f(r)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handlers_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handlers_test.go
new file mode 100644
index 0000000..26776f6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handlers_test.go
@@ -0,0 +1,31 @@
+package aws
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestHandlerList(t *testing.T) {
+ s := ""
+ r := &Request{}
+ l := HandlerList{}
+ l.PushBack(func(r *Request) {
+ s += "a"
+ r.Data = s
+ })
+ l.Run(r)
+ assert.Equal(t, "a", s)
+ assert.Equal(t, "a", r.Data)
+}
+
+func TestMultipleHandlers(t *testing.T) {
+ r := &Request{}
+ l := HandlerList{}
+ l.PushBack(func(r *Request) { r.Data = nil })
+ l.PushFront(func(r *Request) { r.Data = Boolean(true) })
+ l.Run(r)
+ if r.Data != nil {
+ t.Error("Expected handler to execute")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/param_validator.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/param_validator.go
new file mode 100644
index 0000000..b4e95ce
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/param_validator.go
@@ -0,0 +1,89 @@
+package aws
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// ValidateParameters is a request handler to validate the input parameters.
+// Validating parameters only has meaning if done prior to the request being sent.
+func ValidateParameters(r *Request) {
+ if r.ParamsFilled() {
+ v := validator{errors: []string{}}
+ v.validateAny(reflect.ValueOf(r.Params), "")
+
+ if count := len(v.errors); count > 0 {
+ format := "%d validation errors:\n- %s"
+ msg := fmt.Sprintf(format, count, strings.Join(v.errors, "\n- "))
+ r.Error = awserr.New("InvalidParameter", msg, nil)
+ }
+ }
+}
+
+// A validator validates values. Collects validations errors which occurs.
+type validator struct {
+ errors []string
+}
+
+// validateAny will validate any struct, slice or map type. All validations
+// are also performed recursively for nested types.
+func (v *validator) validateAny(value reflect.Value, path string) {
+ value = reflect.Indirect(value)
+ if !value.IsValid() {
+ return
+ }
+
+ switch value.Kind() {
+ case reflect.Struct:
+ v.validateStruct(value, path)
+ case reflect.Slice:
+ for i := 0; i < value.Len(); i++ {
+ v.validateAny(value.Index(i), path+fmt.Sprintf("[%d]", i))
+ }
+ case reflect.Map:
+ for _, n := range value.MapKeys() {
+ v.validateAny(value.MapIndex(n), path+fmt.Sprintf("[%q]", n.String()))
+ }
+ }
+}
+
+// validateStruct will validate the struct value's fields. If the structure has
+// nested types those types will be validated also.
+func (v *validator) validateStruct(value reflect.Value, path string) {
+ prefix := "."
+ if path == "" {
+ prefix = ""
+ }
+
+ for i := 0; i < value.Type().NumField(); i++ {
+ f := value.Type().Field(i)
+ if strings.ToLower(f.Name[0:1]) == f.Name[0:1] {
+ continue
+ }
+ fvalue := value.FieldByName(f.Name)
+
+ notset := false
+ if f.Tag.Get("required") != "" {
+ switch fvalue.Kind() {
+ case reflect.Ptr, reflect.Slice, reflect.Map:
+ if fvalue.IsNil() {
+ notset = true
+ }
+ default:
+ if !fvalue.IsValid() {
+ notset = true
+ }
+ }
+ }
+
+ if notset {
+ msg := "missing required parameter: " + path + prefix + f.Name
+ v.errors = append(v.errors, msg)
+ } else {
+ v.validateAny(fvalue, path+prefix+f.Name)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/param_validator_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/param_validator_test.go
new file mode 100644
index 0000000..b8239f4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/param_validator_test.go
@@ -0,0 +1,84 @@
+package aws_test
+
+import (
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/stretchr/testify/assert"
+)
+
+var service = func() *aws.Service {
+ s := &aws.Service{
+ Config: &aws.Config{},
+ ServiceName: "mock-service",
+ APIVersion: "2015-01-01",
+ }
+ return s
+}()
+
+type StructShape struct {
+ RequiredList []*ConditionalStructShape `required:"true"`
+ RequiredMap map[string]*ConditionalStructShape `required:"true"`
+ RequiredBool *bool `required:"true"`
+ OptionalStruct *ConditionalStructShape
+
+ hiddenParameter *string
+
+ metadataStructureShape
+}
+
+type metadataStructureShape struct {
+ SDKShapeTraits bool
+}
+
+type ConditionalStructShape struct {
+ Name *string `required:"true"`
+ SDKShapeTraits bool
+}
+
+func TestNoErrors(t *testing.T) {
+ input := &StructShape{
+ RequiredList: []*ConditionalStructShape{},
+ RequiredMap: map[string]*ConditionalStructShape{
+ "key1": {Name: aws.String("Name")},
+ "key2": {Name: aws.String("Name")},
+ },
+ RequiredBool: aws.Boolean(true),
+ OptionalStruct: &ConditionalStructShape{Name: aws.String("Name")},
+ }
+
+ req := aws.NewRequest(service, &aws.Operation{}, input, nil)
+ aws.ValidateParameters(req)
+ assert.NoError(t, req.Error)
+}
+
+func TestMissingRequiredParameters(t *testing.T) {
+ input := &StructShape{}
+ req := aws.NewRequest(service, &aws.Operation{}, input, nil)
+ aws.ValidateParameters(req)
+
+ assert.Error(t, req.Error)
+ assert.Equal(t, "InvalidParameter", req.Error.(awserr.Error).Code())
+ assert.Equal(t, "3 validation errors:\n- missing required parameter: RequiredList\n- missing required parameter: RequiredMap\n- missing required parameter: RequiredBool", req.Error.(awserr.Error).Message())
+}
+
+func TestNestedMissingRequiredParameters(t *testing.T) {
+ input := &StructShape{
+ RequiredList: []*ConditionalStructShape{{}},
+ RequiredMap: map[string]*ConditionalStructShape{
+ "key1": {Name: aws.String("Name")},
+ "key2": {},
+ },
+ RequiredBool: aws.Boolean(true),
+ OptionalStruct: &ConditionalStructShape{},
+ }
+
+ req := aws.NewRequest(service, &aws.Operation{}, input, nil)
+ aws.ValidateParameters(req)
+
+ assert.Error(t, req.Error)
+ assert.Equal(t, "InvalidParameter", req.Error.(awserr.Error).Code())
+ assert.Equal(t, "3 validation errors:\n- missing required parameter: RequiredList[0].Name\n- missing required parameter: RequiredMap[\"key2\"].Name\n- missing required parameter: OptionalStruct.Name", req.Error.(awserr.Error).Message())
+
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request.go
new file mode 100644
index 0000000..68d1a4d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request.go
@@ -0,0 +1,312 @@
+package aws
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "reflect"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+)
+
+// A Request is the service request to be made.
+type Request struct {
+ *Service
+ Handlers Handlers
+ Time time.Time
+ ExpireTime time.Duration
+ Operation *Operation
+ HTTPRequest *http.Request
+ HTTPResponse *http.Response
+ Body io.ReadSeeker
+ bodyStart int64 // offset from beginning of Body that the request body starts
+ Params interface{}
+ Error error
+ Data interface{}
+ RequestID string
+ RetryCount uint
+ Retryable SettableBool
+ RetryDelay time.Duration
+
+ built bool
+}
+
+// An Operation is the service API operation to be made.
+type Operation struct {
+ Name string
+ HTTPMethod string
+ HTTPPath string
+ *Paginator
+}
+
+// Paginator keeps track of pagination configuration for an API operation.
+type Paginator struct {
+ InputTokens []string
+ OutputTokens []string
+ LimitToken string
+ TruncationToken string
+}
+
+// NewRequest returns a new Request pointer for the service API
+// operation and parameters.
+//
+// Params is any value of input parameters to be the request payload.
+// Data is pointer value to an object which the request's response
+// payload will be deserialized to.
+func NewRequest(service *Service, operation *Operation, params interface{}, data interface{}) *Request {
+ method := operation.HTTPMethod
+ if method == "" {
+ method = "POST"
+ }
+ p := operation.HTTPPath
+ if p == "" {
+ p = "/"
+ }
+
+ httpReq, _ := http.NewRequest(method, "", nil)
+ httpReq.URL, _ = url.Parse(service.Endpoint + p)
+
+ r := &Request{
+ Service: service,
+ Handlers: service.Handlers.copy(),
+ Time: time.Now(),
+ ExpireTime: 0,
+ Operation: operation,
+ HTTPRequest: httpReq,
+ Body: nil,
+ Params: params,
+ Error: nil,
+ Data: data,
+ }
+ r.SetBufferBody([]byte{})
+
+ return r
+}
+
+// WillRetry returns if the request's can be retried.
+func (r *Request) WillRetry() bool {
+ return r.Error != nil && r.Retryable.Get() && r.RetryCount < r.Service.MaxRetries()
+}
+
+// ParamsFilled returns if the request's parameters have been populated
+// and the parameters are valid. False is returned if no parameters are
+// provided or invalid.
+func (r *Request) ParamsFilled() bool {
+ return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid()
+}
+
+// DataFilled returns true if the request's data for response deserialization
+// target has been set and is a valid. False is returned if data is not
+// set, or is invalid.
+func (r *Request) DataFilled() bool {
+ return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid()
+}
+
+// SetBufferBody will set the request's body bytes that will be sent to
+// the service API.
+func (r *Request) SetBufferBody(buf []byte) {
+ r.SetReaderBody(bytes.NewReader(buf))
+}
+
+// SetStringBody sets the body of the request to be backed by a string.
+func (r *Request) SetStringBody(s string) {
+ r.SetReaderBody(strings.NewReader(s))
+}
+
+// SetReaderBody will set the request's body reader.
+func (r *Request) SetReaderBody(reader io.ReadSeeker) {
+ r.HTTPRequest.Body = ioutil.NopCloser(reader)
+ r.Body = reader
+}
+
+// Presign returns the request's signed URL. Error will be returned
+// if the signing fails.
+func (r *Request) Presign(expireTime time.Duration) (string, error) {
+ r.ExpireTime = expireTime
+ r.Sign()
+ if r.Error != nil {
+ return "", r.Error
+ }
+ return r.HTTPRequest.URL.String(), nil
+}
+
+// Build will build the request's object so it can be signed and sent
+// to the service. Build will also validate all the request's parameters.
+// Anny additional build Handlers set on this request will be run
+// in the order they were set.
+//
+// The request will only be built once. Multiple calls to build will have
+// no effect.
+//
+// If any Validate or Build errors occur the build will stop and the error
+// which occurred will be returned.
+func (r *Request) Build() error {
+ if !r.built {
+ r.Error = nil
+ r.Handlers.Validate.Run(r)
+ if r.Error != nil {
+ return r.Error
+ }
+ r.Handlers.Build.Run(r)
+ r.built = true
+ }
+
+ return r.Error
+}
+
+// Sign will sign the request retuning error if errors are encountered.
+//
+// Send will build the request prior to signing. All Sign Handlers will
+// be executed in the order they were set.
+func (r *Request) Sign() error {
+ r.Build()
+ if r.Error != nil {
+ return r.Error
+ }
+
+ r.Handlers.Sign.Run(r)
+ return r.Error
+}
+
+// Send will send the request returning error if errors are encountered.
+//
+// Send will sign the request prior to sending. All Send Handlers will
+// be executed in the order they were set.
+func (r *Request) Send() error {
+ for {
+ r.Sign()
+ if r.Error != nil {
+ return r.Error
+ }
+
+ if r.Retryable.Get() {
+ // Re-seek the body back to the original point in for a retry so that
+ // send will send the body's contents again in the upcoming request.
+ r.Body.Seek(r.bodyStart, 0)
+ }
+ r.Retryable.Reset()
+
+ r.Handlers.Send.Run(r)
+ if r.Error != nil {
+ r.Handlers.Retry.Run(r)
+ r.Handlers.AfterRetry.Run(r)
+ if r.Error != nil {
+ return r.Error
+ }
+ continue
+ }
+
+ r.Handlers.UnmarshalMeta.Run(r)
+ r.Handlers.ValidateResponse.Run(r)
+ if r.Error != nil {
+ r.Handlers.UnmarshalError.Run(r)
+ r.Handlers.Retry.Run(r)
+ r.Handlers.AfterRetry.Run(r)
+ if r.Error != nil {
+ return r.Error
+ }
+ continue
+ }
+
+ r.Handlers.Unmarshal.Run(r)
+ if r.Error != nil {
+ r.Handlers.Retry.Run(r)
+ r.Handlers.AfterRetry.Run(r)
+ if r.Error != nil {
+ return r.Error
+ }
+ continue
+ }
+
+ break
+ }
+
+ return nil
+}
+
+// HasNextPage returns true if this request has more pages of data available.
+func (r *Request) HasNextPage() bool {
+ return r.nextPageTokens() != nil
+}
+
+// nextPageTokens returns the tokens to use when asking for the next page of
+// data.
+func (r *Request) nextPageTokens() []interface{} {
+ if r.Operation.Paginator == nil {
+ return nil
+ }
+
+ if r.Operation.TruncationToken != "" {
+ tr := awsutil.ValuesAtAnyPath(r.Data, r.Operation.TruncationToken)
+ if tr == nil || len(tr) == 0 {
+ return nil
+ }
+ switch v := tr[0].(type) {
+ case bool:
+ if v == false {
+ return nil
+ }
+ }
+ }
+
+ found := false
+ tokens := make([]interface{}, len(r.Operation.OutputTokens))
+
+ for i, outtok := range r.Operation.OutputTokens {
+ v := awsutil.ValuesAtAnyPath(r.Data, outtok)
+ if v != nil && len(v) > 0 {
+ found = true
+ tokens[i] = v[0]
+ }
+ }
+
+ if found {
+ return tokens
+ }
+ return nil
+}
+
+// NextPage returns a new Request that can be executed to return the next
+// page of result data. Call .Send() on this request to execute it.
+func (r *Request) NextPage() *Request {
+ tokens := r.nextPageTokens()
+ if tokens == nil {
+ return nil
+ }
+
+ data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface()
+ nr := NewRequest(r.Service, r.Operation, awsutil.CopyOf(r.Params), data)
+ for i, intok := range nr.Operation.InputTokens {
+ awsutil.SetValueAtAnyPath(nr.Params, intok, tokens[i])
+ }
+ return nr
+}
+
+// EachPage iterates over each page of a paginated request object. The fn
+// parameter should be a function with the following sample signature:
+//
+// func(page *T, lastPage bool) bool {
+// return true // return false to stop iterating
+// }
+//
+// Where "T" is the structure type matching the output structure of the given
+// operation. For example, a request object generated by
+// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput
+// as the structure "T". The lastPage value represents whether the page is
+// the last page of data or not. The return value of this function should
+// return true to keep iterating or false to stop.
+func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error {
+ for page := r; page != nil; page = page.NextPage() {
+ page.Send()
+ shouldContinue := fn(page.Data, !page.HasNextPage())
+ if page.Error != nil || !shouldContinue {
+ return page.Error
+ }
+ }
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request_pagination_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request_pagination_test.go
new file mode 100644
index 0000000..35b7ee8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request_pagination_test.go
@@ -0,0 +1,305 @@
+package aws_test
+
+import (
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/internal/test/unit"
+ "github.com/aws/aws-sdk-go/service/dynamodb"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/stretchr/testify/assert"
+)
+
+var _ = unit.Imported
+
+// Use DynamoDB methods for simplicity
+func TestPagination(t *testing.T) {
+ db := dynamodb.New(nil)
+ tokens, pages, numPages, gotToEnd := []string{}, []string{}, 0, false
+
+ reqNum := 0
+ resps := []*dynamodb.ListTablesOutput{
+ {TableNames: []*string{aws.String("Table1"), aws.String("Table2")}, LastEvaluatedTableName: aws.String("Table2")},
+ {TableNames: []*string{aws.String("Table3"), aws.String("Table4")}, LastEvaluatedTableName: aws.String("Table4")},
+ {TableNames: []*string{aws.String("Table5")}},
+ }
+
+ db.Handlers.Send.Clear() // mock sending
+ db.Handlers.Unmarshal.Clear()
+ db.Handlers.UnmarshalMeta.Clear()
+ db.Handlers.ValidateResponse.Clear()
+ db.Handlers.Build.PushBack(func(r *aws.Request) {
+ in := r.Params.(*dynamodb.ListTablesInput)
+ if in == nil {
+ tokens = append(tokens, "")
+ } else if in.ExclusiveStartTableName != nil {
+ tokens = append(tokens, *in.ExclusiveStartTableName)
+ }
+ })
+ db.Handlers.Unmarshal.PushBack(func(r *aws.Request) {
+ r.Data = resps[reqNum]
+ reqNum++
+ })
+
+ params := &dynamodb.ListTablesInput{Limit: aws.Long(2)}
+ err := db.ListTablesPages(params, func(p *dynamodb.ListTablesOutput, last bool) bool {
+ numPages++
+ for _, t := range p.TableNames {
+ pages = append(pages, *t)
+ }
+ if last {
+ if gotToEnd {
+ assert.Fail(t, "last=true happened twice")
+ }
+ gotToEnd = true
+ }
+ return true
+ })
+
+ assert.Equal(t, []string{"Table2", "Table4"}, tokens)
+ assert.Equal(t, []string{"Table1", "Table2", "Table3", "Table4", "Table5"}, pages)
+ assert.Equal(t, 3, numPages)
+ assert.True(t, gotToEnd)
+ assert.Nil(t, err)
+ assert.Nil(t, params.ExclusiveStartTableName)
+}
+
+// Use DynamoDB methods for simplicity
+func TestPaginationEachPage(t *testing.T) {
+ db := dynamodb.New(nil)
+ tokens, pages, numPages, gotToEnd := []string{}, []string{}, 0, false
+
+ reqNum := 0
+ resps := []*dynamodb.ListTablesOutput{
+ {TableNames: []*string{aws.String("Table1"), aws.String("Table2")}, LastEvaluatedTableName: aws.String("Table2")},
+ {TableNames: []*string{aws.String("Table3"), aws.String("Table4")}, LastEvaluatedTableName: aws.String("Table4")},
+ {TableNames: []*string{aws.String("Table5")}},
+ }
+
+ db.Handlers.Send.Clear() // mock sending
+ db.Handlers.Unmarshal.Clear()
+ db.Handlers.UnmarshalMeta.Clear()
+ db.Handlers.ValidateResponse.Clear()
+ db.Handlers.Build.PushBack(func(r *aws.Request) {
+ in := r.Params.(*dynamodb.ListTablesInput)
+ if in == nil {
+ tokens = append(tokens, "")
+ } else if in.ExclusiveStartTableName != nil {
+ tokens = append(tokens, *in.ExclusiveStartTableName)
+ }
+ })
+ db.Handlers.Unmarshal.PushBack(func(r *aws.Request) {
+ r.Data = resps[reqNum]
+ reqNum++
+ })
+
+ params := &dynamodb.ListTablesInput{Limit: aws.Long(2)}
+ req, _ := db.ListTablesRequest(params)
+ err := req.EachPage(func(p interface{}, last bool) bool {
+ numPages++
+ for _, t := range p.(*dynamodb.ListTablesOutput).TableNames {
+ pages = append(pages, *t)
+ }
+ if last {
+ if gotToEnd {
+ assert.Fail(t, "last=true happened twice")
+ }
+ gotToEnd = true
+ }
+
+ return true
+ })
+
+ assert.Equal(t, []string{"Table2", "Table4"}, tokens)
+ assert.Equal(t, []string{"Table1", "Table2", "Table3", "Table4", "Table5"}, pages)
+ assert.Equal(t, 3, numPages)
+ assert.True(t, gotToEnd)
+ assert.Nil(t, err)
+}
+
+// Use DynamoDB methods for simplicity
+func TestPaginationEarlyExit(t *testing.T) {
+ db := dynamodb.New(nil)
+ numPages, gotToEnd := 0, false
+
+ reqNum := 0
+ resps := []*dynamodb.ListTablesOutput{
+ {TableNames: []*string{aws.String("Table1"), aws.String("Table2")}, LastEvaluatedTableName: aws.String("Table2")},
+ {TableNames: []*string{aws.String("Table3"), aws.String("Table4")}, LastEvaluatedTableName: aws.String("Table4")},
+ {TableNames: []*string{aws.String("Table5")}},
+ }
+
+ db.Handlers.Send.Clear() // mock sending
+ db.Handlers.Unmarshal.Clear()
+ db.Handlers.UnmarshalMeta.Clear()
+ db.Handlers.ValidateResponse.Clear()
+ db.Handlers.Unmarshal.PushBack(func(r *aws.Request) {
+ r.Data = resps[reqNum]
+ reqNum++
+ })
+
+ params := &dynamodb.ListTablesInput{Limit: aws.Long(2)}
+ err := db.ListTablesPages(params, func(p *dynamodb.ListTablesOutput, last bool) bool {
+ numPages++
+ if numPages == 2 {
+ return false
+ }
+ if last {
+ if gotToEnd {
+ assert.Fail(t, "last=true happened twice")
+ }
+ gotToEnd = true
+ }
+ return true
+ })
+
+ assert.Equal(t, 2, numPages)
+ assert.False(t, gotToEnd)
+ assert.Nil(t, err)
+}
+
+func TestSkipPagination(t *testing.T) {
+ client := s3.New(nil)
+ client.Handlers.Send.Clear() // mock sending
+ client.Handlers.Unmarshal.Clear()
+ client.Handlers.UnmarshalMeta.Clear()
+ client.Handlers.ValidateResponse.Clear()
+ client.Handlers.Unmarshal.PushBack(func(r *aws.Request) {
+ r.Data = &s3.HeadBucketOutput{}
+ })
+
+ req, _ := client.HeadBucketRequest(&s3.HeadBucketInput{Bucket: aws.String("bucket")})
+
+ numPages, gotToEnd := 0, false
+ req.EachPage(func(p interface{}, last bool) bool {
+ numPages++
+ if last {
+ gotToEnd = true
+ }
+ return true
+ })
+ assert.Equal(t, 1, numPages)
+ assert.True(t, gotToEnd)
+}
+
+// Use S3 for simplicity
+func TestPaginationTruncation(t *testing.T) {
+ count := 0
+ client := s3.New(nil)
+
+ reqNum := &count
+ resps := []*s3.ListObjectsOutput{
+ {IsTruncated: aws.Boolean(true), Contents: []*s3.Object{{Key: aws.String("Key1")}}},
+ {IsTruncated: aws.Boolean(true), Contents: []*s3.Object{{Key: aws.String("Key2")}}},
+ {IsTruncated: aws.Boolean(false), Contents: []*s3.Object{{Key: aws.String("Key3")}}},
+ {IsTruncated: aws.Boolean(true), Contents: []*s3.Object{{Key: aws.String("Key4")}}},
+ }
+
+ client.Handlers.Send.Clear() // mock sending
+ client.Handlers.Unmarshal.Clear()
+ client.Handlers.UnmarshalMeta.Clear()
+ client.Handlers.ValidateResponse.Clear()
+ client.Handlers.Unmarshal.PushBack(func(r *aws.Request) {
+ r.Data = resps[*reqNum]
+ *reqNum++
+ })
+
+ params := &s3.ListObjectsInput{Bucket: aws.String("bucket")}
+
+ results := []string{}
+ err := client.ListObjectsPages(params, func(p *s3.ListObjectsOutput, last bool) bool {
+ results = append(results, *p.Contents[0].Key)
+ return true
+ })
+
+ assert.Equal(t, []string{"Key1", "Key2", "Key3"}, results)
+ assert.Nil(t, err)
+
+ // Try again without truncation token at all
+ count = 0
+ resps[1].IsTruncated = nil
+ resps[2].IsTruncated = aws.Boolean(true)
+ results = []string{}
+ err = client.ListObjectsPages(params, func(p *s3.ListObjectsOutput, last bool) bool {
+ results = append(results, *p.Contents[0].Key)
+ return true
+ })
+
+ assert.Equal(t, []string{"Key1", "Key2"}, results)
+ assert.Nil(t, err)
+
+}
+
+// Benchmarks
+var benchResps = []*dynamodb.ListTablesOutput{
+ {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
+ {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
+ {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
+ {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
+ {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
+ {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
+ {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
+ {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
+ {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
+ {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
+ {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
+ {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
+ {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
+ {TableNames: []*string{aws.String("TABLE")}},
+}
+
+var benchDb = func() *dynamodb.DynamoDB {
+ db := dynamodb.New(nil)
+ db.Handlers.Send.Clear() // mock sending
+ db.Handlers.Unmarshal.Clear()
+ db.Handlers.UnmarshalMeta.Clear()
+ db.Handlers.ValidateResponse.Clear()
+ return db
+}
+
+func BenchmarkCodegenIterator(b *testing.B) {
+ reqNum := 0
+ db := benchDb()
+ db.Handlers.Unmarshal.PushBack(func(r *aws.Request) {
+ r.Data = benchResps[reqNum]
+ reqNum++
+ })
+
+ input := &dynamodb.ListTablesInput{Limit: aws.Long(2)}
+ iter := func(fn func(*dynamodb.ListTablesOutput, bool) bool) error {
+ page, _ := db.ListTablesRequest(input)
+ for ; page != nil; page = page.NextPage() {
+ page.Send()
+ out := page.Data.(*dynamodb.ListTablesOutput)
+ if result := fn(out, !page.HasNextPage()); page.Error != nil || !result {
+ return page.Error
+ }
+ }
+ return nil
+ }
+
+ for i := 0; i < b.N; i++ {
+ reqNum = 0
+ iter(func(p *dynamodb.ListTablesOutput, last bool) bool {
+ return true
+ })
+ }
+}
+
+func BenchmarkEachPageIterator(b *testing.B) {
+ reqNum := 0
+ db := benchDb()
+ db.Handlers.Unmarshal.PushBack(func(r *aws.Request) {
+ r.Data = benchResps[reqNum]
+ reqNum++
+ })
+
+ input := &dynamodb.ListTablesInput{Limit: aws.Long(2)}
+ for i := 0; i < b.N; i++ {
+ reqNum = 0
+ req, _ := db.ListTablesRequest(input)
+ req.EachPage(func(p interface{}, last bool) bool {
+ return true
+ })
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request_test.go
new file mode 100644
index 0000000..fcb4718
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request_test.go
@@ -0,0 +1,219 @@
+package aws
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/stretchr/testify/assert"
+)
+
+type testData struct {
+ Data string
+}
+
+func body(str string) io.ReadCloser {
+ return ioutil.NopCloser(bytes.NewReader([]byte(str)))
+}
+
+func unmarshal(req *Request) {
+ defer req.HTTPResponse.Body.Close()
+ if req.Data != nil {
+ json.NewDecoder(req.HTTPResponse.Body).Decode(req.Data)
+ }
+ return
+}
+
+func unmarshalError(req *Request) {
+ bodyBytes, err := ioutil.ReadAll(req.HTTPResponse.Body)
+ if err != nil {
+ req.Error = awserr.New("UnmarshaleError", req.HTTPResponse.Status, err)
+ return
+ }
+ if len(bodyBytes) == 0 {
+ req.Error = awserr.NewRequestFailure(
+ awserr.New("UnmarshaleError", req.HTTPResponse.Status, fmt.Errorf("empty body")),
+ req.HTTPResponse.StatusCode,
+ "",
+ )
+ return
+ }
+ var jsonErr jsonErrorResponse
+ if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil {
+ req.Error = awserr.New("UnmarshaleError", "JSON unmarshal", err)
+ return
+ }
+ req.Error = awserr.NewRequestFailure(
+ awserr.New(jsonErr.Code, jsonErr.Message, nil),
+ req.HTTPResponse.StatusCode,
+ "",
+ )
+}
+
+type jsonErrorResponse struct {
+ Code string `json:"__type"`
+ Message string `json:"message"`
+}
+
+// test that retries occur for 5xx status codes
+func TestRequestRecoverRetry5xx(t *testing.T) {
+ reqNum := 0
+ reqs := []http.Response{
+ {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
+ {StatusCode: 501, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
+ {StatusCode: 200, Body: body(`{"data":"valid"}`)},
+ }
+
+ s := NewService(&Config{MaxRetries: 10})
+ s.Handlers.Validate.Clear()
+ s.Handlers.Unmarshal.PushBack(unmarshal)
+ s.Handlers.UnmarshalError.PushBack(unmarshalError)
+ s.Handlers.Send.Clear() // mock sending
+ s.Handlers.Send.PushBack(func(r *Request) {
+ r.HTTPResponse = &reqs[reqNum]
+ reqNum++
+ })
+ out := &testData{}
+ r := NewRequest(s, &Operation{Name: "Operation"}, nil, out)
+ err := r.Send()
+ assert.Nil(t, err)
+ assert.Equal(t, 2, int(r.RetryCount))
+ assert.Equal(t, "valid", out.Data)
+}
+
+// test that retries occur for 4xx status codes with a response type that can be retried - see `shouldRetry`
+func TestRequestRecoverRetry4xxRetryable(t *testing.T) {
+ reqNum := 0
+ reqs := []http.Response{
+ {StatusCode: 400, Body: body(`{"__type":"Throttling","message":"Rate exceeded."}`)},
+ {StatusCode: 429, Body: body(`{"__type":"ProvisionedThroughputExceededException","message":"Rate exceeded."}`)},
+ {StatusCode: 200, Body: body(`{"data":"valid"}`)},
+ }
+
+ s := NewService(&Config{MaxRetries: 10})
+ s.Handlers.Validate.Clear()
+ s.Handlers.Unmarshal.PushBack(unmarshal)
+ s.Handlers.UnmarshalError.PushBack(unmarshalError)
+ s.Handlers.Send.Clear() // mock sending
+ s.Handlers.Send.PushBack(func(r *Request) {
+ r.HTTPResponse = &reqs[reqNum]
+ reqNum++
+ })
+ out := &testData{}
+ r := NewRequest(s, &Operation{Name: "Operation"}, nil, out)
+ err := r.Send()
+ assert.Nil(t, err)
+ assert.Equal(t, 2, int(r.RetryCount))
+ assert.Equal(t, "valid", out.Data)
+}
+
+// test that retries don't occur for 4xx status codes with a response type that can't be retried
+func TestRequest4xxUnretryable(t *testing.T) {
+ s := NewService(&Config{MaxRetries: 10})
+ s.Handlers.Validate.Clear()
+ s.Handlers.Unmarshal.PushBack(unmarshal)
+ s.Handlers.UnmarshalError.PushBack(unmarshalError)
+ s.Handlers.Send.Clear() // mock sending
+ s.Handlers.Send.PushBack(func(r *Request) {
+ r.HTTPResponse = &http.Response{StatusCode: 401, Body: body(`{"__type":"SignatureDoesNotMatch","message":"Signature does not match."}`)}
+ })
+ out := &testData{}
+ r := NewRequest(s, &Operation{Name: "Operation"}, nil, out)
+ err := r.Send()
+ assert.NotNil(t, err)
+ if e, ok := err.(awserr.RequestFailure); ok {
+ assert.Equal(t, 401, e.StatusCode())
+ } else {
+ assert.Fail(t, "Expected error to be a service failure")
+ }
+ assert.Equal(t, "SignatureDoesNotMatch", err.(awserr.Error).Code())
+ assert.Equal(t, "Signature does not match.", err.(awserr.Error).Message())
+ assert.Equal(t, 0, int(r.RetryCount))
+}
+
+func TestRequestExhaustRetries(t *testing.T) {
+ delays := []time.Duration{}
+ sleepDelay = func(delay time.Duration) {
+ delays = append(delays, delay)
+ }
+
+ reqNum := 0
+ reqs := []http.Response{
+ {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
+ {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
+ {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
+ {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
+ }
+
+ s := NewService(&Config{MaxRetries: -1})
+ s.Handlers.Validate.Clear()
+ s.Handlers.Unmarshal.PushBack(unmarshal)
+ s.Handlers.UnmarshalError.PushBack(unmarshalError)
+ s.Handlers.Send.Clear() // mock sending
+ s.Handlers.Send.PushBack(func(r *Request) {
+ r.HTTPResponse = &reqs[reqNum]
+ reqNum++
+ })
+ r := NewRequest(s, &Operation{Name: "Operation"}, nil, nil)
+ err := r.Send()
+ assert.NotNil(t, err)
+ if e, ok := err.(awserr.RequestFailure); ok {
+ assert.Equal(t, 500, e.StatusCode())
+ } else {
+ assert.Fail(t, "Expected error to be a service failure")
+ }
+ assert.Equal(t, "UnknownError", err.(awserr.Error).Code())
+ assert.Equal(t, "An error occurred.", err.(awserr.Error).Message())
+ assert.Equal(t, 3, int(r.RetryCount))
+ assert.True(t, reflect.DeepEqual([]time.Duration{30 * time.Millisecond, 60 * time.Millisecond, 120 * time.Millisecond}, delays))
+}
+
+// test that the request is retried after the credentials are expired.
+func TestRequestRecoverExpiredCreds(t *testing.T) {
+ reqNum := 0
+ reqs := []http.Response{
+ {StatusCode: 400, Body: body(`{"__type":"ExpiredTokenException","message":"expired token"}`)},
+ {StatusCode: 200, Body: body(`{"data":"valid"}`)},
+ }
+
+ s := NewService(&Config{MaxRetries: 10, Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "")})
+ s.Handlers.Validate.Clear()
+ s.Handlers.Unmarshal.PushBack(unmarshal)
+ s.Handlers.UnmarshalError.PushBack(unmarshalError)
+
+ credExpiredBeforeRetry := false
+ credExpiredAfterRetry := false
+
+ s.Handlers.AfterRetry.PushBack(func(r *Request) {
+ credExpiredAfterRetry = r.Config.Credentials.IsExpired()
+ })
+
+ s.Handlers.Sign.Clear()
+ s.Handlers.Sign.PushBack(func(r *Request) {
+ r.Config.Credentials.Get()
+ })
+ s.Handlers.Send.Clear() // mock sending
+ s.Handlers.Send.PushBack(func(r *Request) {
+ r.HTTPResponse = &reqs[reqNum]
+ reqNum++
+ })
+ out := &testData{}
+ r := NewRequest(s, &Operation{Name: "Operation"}, nil, out)
+ err := r.Send()
+ assert.Nil(t, err)
+
+ assert.False(t, credExpiredBeforeRetry, "Expect valid creds before retry check")
+ assert.True(t, credExpiredAfterRetry, "Expect expired creds after retry check")
+ assert.False(t, s.Config.Credentials.IsExpired(), "Expect valid creds after cred expired recovery")
+
+ assert.Equal(t, 1, int(r.RetryCount))
+ assert.Equal(t, "valid", out.Data)
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/service.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/service.go
new file mode 100644
index 0000000..42d1be4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/service.go
@@ -0,0 +1,177 @@
+package aws
+
+import (
+ "fmt"
+ "math"
+ "net/http"
+ "net/http/httputil"
+ "regexp"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/internal/endpoints"
+)
+
+// A Service implements the base service request and response handling
+// used by all services.
+type Service struct {
+ Config *Config
+ Handlers Handlers
+ ManualSend bool
+ ServiceName string
+ APIVersion string
+ Endpoint string
+ SigningName string
+ SigningRegion string
+ JSONVersion string
+ TargetPrefix string
+ RetryRules func(*Request) time.Duration
+ ShouldRetry func(*Request) bool
+ DefaultMaxRetries uint
+}
+
+var schemeRE = regexp.MustCompile("^([^:]+)://")
+
+// NewService will return a pointer to a new Server object initialized.
+func NewService(config *Config) *Service {
+ svc := &Service{Config: config}
+ svc.Initialize()
+ return svc
+}
+
+// Initialize initializes the service.
+func (s *Service) Initialize() {
+ if s.Config == nil {
+ s.Config = &Config{}
+ }
+ if s.Config.HTTPClient == nil {
+ s.Config.HTTPClient = http.DefaultClient
+ }
+
+ if s.RetryRules == nil {
+ s.RetryRules = retryRules
+ }
+
+ if s.ShouldRetry == nil {
+ s.ShouldRetry = shouldRetry
+ }
+
+ s.DefaultMaxRetries = 3
+ s.Handlers.Validate.PushBack(ValidateEndpointHandler)
+ s.Handlers.Build.PushBack(UserAgentHandler)
+ s.Handlers.Sign.PushBack(BuildContentLength)
+ s.Handlers.Send.PushBack(SendHandler)
+ s.Handlers.AfterRetry.PushBack(AfterRetryHandler)
+ s.Handlers.ValidateResponse.PushBack(ValidateResponseHandler)
+ s.AddDebugHandlers()
+ s.buildEndpoint()
+
+ if !s.Config.DisableParamValidation {
+ s.Handlers.Validate.PushBack(ValidateParameters)
+ }
+}
+
+// buildEndpoint builds the endpoint values the service will use to make requests with.
+func (s *Service) buildEndpoint() {
+ if s.Config.Endpoint != "" {
+ s.Endpoint = s.Config.Endpoint
+ } else {
+ s.Endpoint, s.SigningRegion =
+ endpoints.EndpointForRegion(s.ServiceName, s.Config.Region)
+ }
+
+ if s.Endpoint != "" && !schemeRE.MatchString(s.Endpoint) {
+ scheme := "https"
+ if s.Config.DisableSSL {
+ scheme = "http"
+ }
+ s.Endpoint = scheme + "://" + s.Endpoint
+ }
+}
+
+// AddDebugHandlers injects debug logging handlers into the service to log request
+// debug information.
+func (s *Service) AddDebugHandlers() {
+ out := s.Config.Logger
+ if s.Config.LogLevel == 0 {
+ return
+ }
+
+ s.Handlers.Send.PushFront(func(r *Request) {
+ logBody := r.Config.LogHTTPBody
+ dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody)
+
+ fmt.Fprintf(out, "---[ REQUEST POST-SIGN ]-----------------------------\n")
+ fmt.Fprintf(out, "%s\n", string(dumpedBody))
+ fmt.Fprintf(out, "-----------------------------------------------------\n")
+ })
+ s.Handlers.Send.PushBack(func(r *Request) {
+ fmt.Fprintf(out, "---[ RESPONSE ]--------------------------------------\n")
+ if r.HTTPResponse != nil {
+ logBody := r.Config.LogHTTPBody
+ dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody)
+ fmt.Fprintf(out, "%s\n", string(dumpedBody))
+ } else if r.Error != nil {
+ fmt.Fprintf(out, "%s\n", r.Error)
+ }
+ fmt.Fprintf(out, "-----------------------------------------------------\n")
+ })
+}
+
+// MaxRetries returns the number of maximum returns the service will use to make
+// an individual API request.
+func (s *Service) MaxRetries() uint {
+ if s.Config.MaxRetries < 0 {
+ return s.DefaultMaxRetries
+ }
+ return uint(s.Config.MaxRetries)
+}
+
+// retryRules returns the delay duration before retrying this request again
+func retryRules(r *Request) time.Duration {
+ delay := time.Duration(math.Pow(2, float64(r.RetryCount))) * 30
+ return delay * time.Millisecond
+}
+
+// retryableCodes is a collection of service response codes which are retry-able
+// without any further action.
+var retryableCodes = map[string]struct{}{
+ "RequestError": {},
+ "ProvisionedThroughputExceededException": {},
+ "Throttling": {},
+}
+
+// credsExpiredCodes is a collection of error codes which signify the credentials
+// need to be refreshed. Expired tokens require refreshing of credentials, and
+// resigning before the request can be retried.
+var credsExpiredCodes = map[string]struct{}{
+ "ExpiredToken": {},
+ "ExpiredTokenException": {},
+ "RequestExpired": {}, // EC2 Only
+}
+
+func isCodeRetryable(code string) bool {
+ if _, ok := retryableCodes[code]; ok {
+ return true
+ }
+
+ return isCodeExpiredCreds(code)
+}
+
+func isCodeExpiredCreds(code string) bool {
+ _, ok := credsExpiredCodes[code]
+ return ok
+}
+
+// shouldRetry returns if the request should be retried.
+func shouldRetry(r *Request) bool {
+ if r.HTTPResponse.StatusCode >= 500 {
+ return true
+ }
+ if r.Error != nil {
+ if err, ok := r.Error.(awserr.Error); ok {
+ return isCodeRetryable(err.Code())
+ }
+ }
+ return false
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types.go
new file mode 100644
index 0000000..7801cb6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types.go
@@ -0,0 +1,131 @@
+package aws
+
+import (
+ "fmt"
+ "io"
+ "time"
+)
+
+// String converts a Go string into a string pointer.
+func String(v string) *string {
+ return &v
+}
+
+// Boolean converts a Go bool into a boolean pointer.
+func Boolean(v bool) *bool {
+ return &v
+}
+
+// Long converts a Go int64 into a long pointer.
+func Long(v int64) *int64 {
+ return &v
+}
+
+// Double converts a Go float64 into a double pointer.
+func Double(v float64) *float64 {
+ return &v
+}
+
+// Time converts a Go Time into a Time pointer
+func Time(t time.Time) *time.Time {
+ return &t
+}
+
+// ReadSeekCloser wraps a io.Reader returning a ReaderSeakerCloser
+func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
+ return ReaderSeekerCloser{r}
+}
+
+// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and
+// io.Closer interfaces to the underlying object if they are available.
+type ReaderSeekerCloser struct {
+ r io.Reader
+}
+
+// Read reads from the reader up to size of p. The number of bytes read, and
+// error if it occurred will be returned.
+//
+// If the reader is not an io.Reader zero bytes read, and nil error will be returned.
+//
+// Performs the same functionality as io.Reader Read
+func (r ReaderSeekerCloser) Read(p []byte) (int, error) {
+ switch t := r.r.(type) {
+ case io.Reader:
+ return t.Read(p)
+ }
+ return 0, nil
+}
+
+// Seek sets the offset for the next Read to offset, interpreted according to
+// whence: 0 means relative to the origin of the file, 1 means relative to the
+// current offset, and 2 means relative to the end. Seek returns the new offset
+// and an error, if any.
+//
+// If the ReaderSeekerCloser is not an io.Seeker nothing will be done.
+func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) {
+ switch t := r.r.(type) {
+ case io.Seeker:
+ return t.Seek(offset, whence)
+ }
+ return int64(0), nil
+}
+
+// Close closes the ReaderSeekerCloser.
+//
+// If the ReaderSeekerCloser is not an io.Closer nothing will be done.
+func (r ReaderSeekerCloser) Close() error {
+ switch t := r.r.(type) {
+ case io.Closer:
+ return t.Close()
+ }
+ return nil
+}
+
+// A SettableBool provides a boolean value which includes the state if
+// the value was set or unset. The set state is in addition to the value's
+// value(true|false)
+type SettableBool struct {
+ value bool
+ set bool
+}
+
+// SetBool returns a SettableBool with a value set
+func SetBool(value bool) SettableBool {
+ return SettableBool{value: value, set: true}
+}
+
+// Get returns the value. Will always be false if the SettableBool was not set.
+func (b *SettableBool) Get() bool {
+ if !b.set {
+ return false
+ }
+ return b.value
+}
+
+// Set sets the value and updates the state that the value has been set.
+func (b *SettableBool) Set(value bool) {
+ b.value = value
+ b.set = true
+}
+
+// IsSet returns if the value has been set
+func (b *SettableBool) IsSet() bool {
+ return b.set
+}
+
+// Reset resets the state and value of the SettableBool to its initial default
+// state of not set and zero value.
+func (b *SettableBool) Reset() {
+ b.value = false
+ b.set = false
+}
+
+// String returns the string representation of the value if set. Zero if not set.
+func (b *SettableBool) String() string {
+ return fmt.Sprintf("%t", b.Get())
+}
+
+// GoString returns the string representation of the SettableBool value and state
+func (b *SettableBool) GoString() string {
+ return fmt.Sprintf("Bool{value:%t, set:%t}", b.value, b.set)
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go
new file mode 100644
index 0000000..94b8c72
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go
@@ -0,0 +1,8 @@
+// Package aws provides core functionality for making requests to AWS services.
+package aws
+
+// SDKName is the name of this AWS SDK
+const SDKName = "aws-sdk-go"
+
+// SDKVersion is the version of this SDK
+const SDKVersion = "0.6.4"
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.go
new file mode 100644
index 0000000..d040ccc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.go
@@ -0,0 +1,31 @@
+// Package endpoints validates regional endpoints for services.
+package endpoints
+
+//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go
+//go:generate gofmt -s -w endpoints_map.go
+
+import "strings"
+
+// EndpointForRegion returns an endpoint and its signing region for a service and region.
+// if the service and region pair are not found endpoint and signingRegion will be empty.
+func EndpointForRegion(svcName, region string) (endpoint, signingRegion string) {
+ derivedKeys := []string{
+ region + "/" + svcName,
+ region + "/*",
+ "*/" + svcName,
+ "*/*",
+ }
+
+ for _, key := range derivedKeys {
+ if val, ok := endpointsMap.Endpoints[key]; ok {
+ ep := val.Endpoint
+ ep = strings.Replace(ep, "{region}", region, -1)
+ ep = strings.Replace(ep, "{service}", svcName, -1)
+
+ endpoint = ep
+ signingRegion = val.SigningRegion
+ return
+ }
+ }
+ return
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.json b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.json
new file mode 100644
index 0000000..4c58809
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.json
@@ -0,0 +1,77 @@
+{
+ "version": 2,
+ "endpoints": {
+ "*/*": {
+ "endpoint": "{service}.{region}.amazonaws.com"
+ },
+ "cn-north-1/*": {
+ "endpoint": "{service}.{region}.amazonaws.com.cn",
+ "signatureVersion": "v4"
+ },
+ "us-gov-west-1/iam": {
+ "endpoint": "iam.us-gov.amazonaws.com"
+ },
+ "us-gov-west-1/sts": {
+ "endpoint": "sts.us-gov-west-1.amazonaws.com"
+ },
+ "us-gov-west-1/s3": {
+ "endpoint": "s3-{region}.amazonaws.com"
+ },
+ "*/cloudfront": {
+ "endpoint": "cloudfront.amazonaws.com",
+ "signingRegion": "us-east-1"
+ },
+ "*/cloudsearchdomain": {
+ "endpoint": "",
+ "signingRegion": "us-east-1"
+ },
+ "*/iam": {
+ "endpoint": "iam.amazonaws.com",
+ "signingRegion": "us-east-1"
+ },
+ "*/importexport": {
+ "endpoint": "importexport.amazonaws.com",
+ "signingRegion": "us-east-1"
+ },
+ "*/route53": {
+ "endpoint": "route53.amazonaws.com",
+ "signingRegion": "us-east-1"
+ },
+ "*/sts": {
+ "endpoint": "sts.amazonaws.com",
+ "signingRegion": "us-east-1"
+ },
+ "us-east-1/sdb": {
+ "endpoint": "sdb.amazonaws.com",
+ "signingRegion": "us-east-1"
+ },
+ "us-east-1/s3": {
+ "endpoint": "s3.amazonaws.com"
+ },
+ "us-west-1/s3": {
+ "endpoint": "s3-{region}.amazonaws.com"
+ },
+ "us-west-2/s3": {
+ "endpoint": "s3-{region}.amazonaws.com"
+ },
+ "eu-west-1/s3": {
+ "endpoint": "s3-{region}.amazonaws.com"
+ },
+ "ap-southeast-1/s3": {
+ "endpoint": "s3-{region}.amazonaws.com"
+ },
+ "ap-southeast-2/s3": {
+ "endpoint": "s3-{region}.amazonaws.com"
+ },
+ "ap-northeast-1/s3": {
+ "endpoint": "s3-{region}.amazonaws.com"
+ },
+ "sa-east-1/s3": {
+ "endpoint": "s3-{region}.amazonaws.com"
+ },
+ "eu-central-1/s3": {
+ "endpoint": "{service}.{region}.amazonaws.com",
+ "signatureVersion": "v4"
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints_map.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints_map.go
new file mode 100644
index 0000000..894c1a6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints_map.go
@@ -0,0 +1,89 @@
+package endpoints
+
+// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+
+type endpointStruct struct {
+ Version int
+ Endpoints map[string]endpointEntry
+}
+
+type endpointEntry struct {
+ Endpoint string
+ SigningRegion string
+}
+
+var endpointsMap = endpointStruct{
+ Version: 2,
+ Endpoints: map[string]endpointEntry{
+ "*/*": {
+ Endpoint: "{service}.{region}.amazonaws.com",
+ },
+ "*/cloudfront": {
+ Endpoint: "cloudfront.amazonaws.com",
+ SigningRegion: "us-east-1",
+ },
+ "*/cloudsearchdomain": {
+ Endpoint: "",
+ SigningRegion: "us-east-1",
+ },
+ "*/iam": {
+ Endpoint: "iam.amazonaws.com",
+ SigningRegion: "us-east-1",
+ },
+ "*/importexport": {
+ Endpoint: "importexport.amazonaws.com",
+ SigningRegion: "us-east-1",
+ },
+ "*/route53": {
+ Endpoint: "route53.amazonaws.com",
+ SigningRegion: "us-east-1",
+ },
+ "*/sts": {
+ Endpoint: "sts.amazonaws.com",
+ SigningRegion: "us-east-1",
+ },
+ "ap-northeast-1/s3": {
+ Endpoint: "s3-{region}.amazonaws.com",
+ },
+ "ap-southeast-1/s3": {
+ Endpoint: "s3-{region}.amazonaws.com",
+ },
+ "ap-southeast-2/s3": {
+ Endpoint: "s3-{region}.amazonaws.com",
+ },
+ "cn-north-1/*": {
+ Endpoint: "{service}.{region}.amazonaws.com.cn",
+ },
+ "eu-central-1/s3": {
+ Endpoint: "{service}.{region}.amazonaws.com",
+ },
+ "eu-west-1/s3": {
+ Endpoint: "s3-{region}.amazonaws.com",
+ },
+ "sa-east-1/s3": {
+ Endpoint: "s3-{region}.amazonaws.com",
+ },
+ "us-east-1/s3": {
+ Endpoint: "s3.amazonaws.com",
+ },
+ "us-east-1/sdb": {
+ Endpoint: "sdb.amazonaws.com",
+ SigningRegion: "us-east-1",
+ },
+ "us-gov-west-1/iam": {
+ Endpoint: "iam.us-gov.amazonaws.com",
+ },
+ "us-gov-west-1/s3": {
+ Endpoint: "s3-{region}.amazonaws.com",
+ },
+ "us-gov-west-1/sts": {
+ Endpoint: "sts.us-gov-west-1.amazonaws.com",
+ },
+ "us-west-1/s3": {
+ Endpoint: "s3-{region}.amazonaws.com",
+ },
+ "us-west-2/s3": {
+ Endpoint: "s3-{region}.amazonaws.com",
+ },
+ },
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints_test.go
new file mode 100644
index 0000000..8af6587
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints_test.go
@@ -0,0 +1,28 @@
+package endpoints
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestGlobalEndpoints(t *testing.T) {
+ region := "mock-region-1"
+ svcs := []string{"cloudfront", "iam", "importexport", "route53", "sts"}
+
+ for _, name := range svcs {
+ ep, sr := EndpointForRegion(name, region)
+ assert.Equal(t, name+".amazonaws.com", ep)
+ assert.Equal(t, "us-east-1", sr)
+ }
+}
+
+func TestServicesInCN(t *testing.T) {
+ region := "cn-north-1"
+ svcs := []string{"cloudfront", "iam", "importexport", "route53", "sts", "s3"}
+
+ for _, name := range svcs {
+ ep, _ := EndpointForRegion(name, region)
+ assert.Equal(t, name+"."+region+".amazonaws.com.cn", ep)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/build.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/build.go
new file mode 100644
index 0000000..c4d8dd2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/build.go
@@ -0,0 +1,33 @@
+// Package query provides serialisation of AWS query requests, and responses.
+package query
+
+//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/input/query.json build_test.go
+
+import (
+ "net/url"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/internal/protocol/query/queryutil"
+)
+
+// Build builds a request for an AWS Query service.
+func Build(r *aws.Request) {
+ body := url.Values{
+ "Action": {r.Operation.Name},
+ "Version": {r.Service.APIVersion},
+ }
+ if err := queryutil.Parse(body, r.Params, false); err != nil {
+ r.Error = awserr.New("SerializationError", "failed encoding Query request", err)
+ return
+ }
+
+ if r.ExpireTime == 0 {
+ r.HTTPRequest.Method = "POST"
+ r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
+ r.SetBufferBody([]byte(body.Encode()))
+ } else { // This is a pre-signed request
+ r.HTTPRequest.Method = "GET"
+ r.HTTPRequest.URL.RawQuery = body.Encode()
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/build_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/build_test.go
new file mode 100644
index 0000000..b548298
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/build_test.go
@@ -0,0 +1,1491 @@
+package query_test
+
+import (
+ "bytes"
+ "encoding/json"
+ "encoding/xml"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/internal/protocol/query"
+ "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil"
+ "github.com/aws/aws-sdk-go/internal/signer/v4"
+ "github.com/aws/aws-sdk-go/internal/util"
+ "github.com/stretchr/testify/assert"
+)
+
+var _ bytes.Buffer // always import bytes
+var _ http.Request
+var _ json.Marshaler
+var _ time.Time
+var _ xmlutil.XMLNode
+var _ xml.Attr
+var _ = ioutil.Discard
+var _ = util.Trim("")
+var _ = url.Values{}
+var _ = io.EOF
+
+// InputService1ProtocolTest is a client for InputService1ProtocolTest.
+type InputService1ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new InputService1ProtocolTest client.
+func NewInputService1ProtocolTest(config *aws.Config) *InputService1ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "inputservice1protocoltest",
+ APIVersion: "2014-01-01",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(query.Build)
+ service.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return &InputService1ProtocolTest{service}
+}
+
+// newRequest creates a new request for a InputService1ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService1ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opInputService1TestCaseOperation1 = "OperationName"
+
+// InputService1TestCaseOperation1Request generates a request for the InputService1TestCaseOperation1 operation.
+func (c *InputService1ProtocolTest) InputService1TestCaseOperation1Request(input *InputService1TestShapeInputShape) (req *aws.Request, output *InputService1TestShapeInputService1TestCaseOperation1Output) {
+ op := &aws.Operation{
+ Name: opInputService1TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &InputService1TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService1TestShapeInputService1TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService1ProtocolTest) InputService1TestCaseOperation1(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation1Output, error) {
+ req, out := c.InputService1TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService1TestShapeInputService1TestCaseOperation1Output struct {
+ metadataInputService1TestShapeInputService1TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService1TestShapeInputService1TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService1TestShapeInputShape struct {
+ Bar *string `type:"string"`
+
+ Foo *string `type:"string"`
+
+ metadataInputService1TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService1TestShapeInputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// InputService2ProtocolTest is a client for InputService2ProtocolTest.
+type InputService2ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new InputService2ProtocolTest client.
+func NewInputService2ProtocolTest(config *aws.Config) *InputService2ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "inputservice2protocoltest",
+ APIVersion: "2014-01-01",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(query.Build)
+ service.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return &InputService2ProtocolTest{service}
+}
+
+// newRequest creates a new request for a InputService2ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService2ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opInputService2TestCaseOperation1 = "OperationName"
+
+// InputService2TestCaseOperation1Request generates a request for the InputService2TestCaseOperation1 operation.
+func (c *InputService2ProtocolTest) InputService2TestCaseOperation1Request(input *InputService2TestShapeInputShape) (req *aws.Request, output *InputService2TestShapeInputService2TestCaseOperation1Output) {
+ op := &aws.Operation{
+ Name: opInputService2TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &InputService2TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService2TestShapeInputService2TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService2ProtocolTest) InputService2TestCaseOperation1(input *InputService2TestShapeInputShape) (*InputService2TestShapeInputService2TestCaseOperation1Output, error) {
+ req, out := c.InputService2TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService2TestShapeInputService2TestCaseOperation1Output struct {
+ metadataInputService2TestShapeInputService2TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService2TestShapeInputService2TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService2TestShapeInputShape struct {
+ StructArg *InputService2TestShapeStructType `type:"structure"`
+
+ metadataInputService2TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService2TestShapeInputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService2TestShapeStructType struct {
+ ScalarArg *string `type:"string"`
+
+ metadataInputService2TestShapeStructType `json:"-" xml:"-"`
+}
+
+type metadataInputService2TestShapeStructType struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// InputService3ProtocolTest is a client for InputService3ProtocolTest.
+type InputService3ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new InputService3ProtocolTest client.
+func NewInputService3ProtocolTest(config *aws.Config) *InputService3ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "inputservice3protocoltest",
+ APIVersion: "2014-01-01",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(query.Build)
+ service.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return &InputService3ProtocolTest{service}
+}
+
+// newRequest creates a new request for a InputService3ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService3ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opInputService3TestCaseOperation1 = "OperationName"
+
+// InputService3TestCaseOperation1Request generates a request for the InputService3TestCaseOperation1 operation.
+func (c *InputService3ProtocolTest) InputService3TestCaseOperation1Request(input *InputService3TestShapeInputShape) (req *aws.Request, output *InputService3TestShapeInputService3TestCaseOperation1Output) {
+ op := &aws.Operation{
+ Name: opInputService3TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &InputService3TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService3TestShapeInputService3TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService3ProtocolTest) InputService3TestCaseOperation1(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation1Output, error) {
+ req, out := c.InputService3TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService3TestCaseOperation2 = "OperationName"
+
+// InputService3TestCaseOperation2Request generates a request for the InputService3TestCaseOperation2 operation.
+func (c *InputService3ProtocolTest) InputService3TestCaseOperation2Request(input *InputService3TestShapeInputShape) (req *aws.Request, output *InputService3TestShapeInputService3TestCaseOperation2Output) {
+ op := &aws.Operation{
+ Name: opInputService3TestCaseOperation2,
+ }
+
+ if input == nil {
+ input = &InputService3TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService3TestShapeInputService3TestCaseOperation2Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService3ProtocolTest) InputService3TestCaseOperation2(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation2Output, error) {
+ req, out := c.InputService3TestCaseOperation2Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService3TestShapeInputService3TestCaseOperation1Output struct {
+ metadataInputService3TestShapeInputService3TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService3TestShapeInputService3TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService3TestShapeInputService3TestCaseOperation2Output struct {
+ metadataInputService3TestShapeInputService3TestCaseOperation2Output `json:"-" xml:"-"`
+}
+
+type metadataInputService3TestShapeInputService3TestCaseOperation2Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService3TestShapeInputShape struct {
+ ListArg []*string `type:"list"`
+
+ metadataInputService3TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService3TestShapeInputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// InputService4ProtocolTest is a client for InputService4ProtocolTest.
+type InputService4ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new InputService4ProtocolTest client.
+func NewInputService4ProtocolTest(config *aws.Config) *InputService4ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "inputservice4protocoltest",
+ APIVersion: "2014-01-01",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(query.Build)
+ service.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return &InputService4ProtocolTest{service}
+}
+
+// newRequest creates a new request for a InputService4ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService4ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opInputService4TestCaseOperation1 = "OperationName"
+
+// InputService4TestCaseOperation1Request generates a request for the InputService4TestCaseOperation1 operation.
+func (c *InputService4ProtocolTest) InputService4TestCaseOperation1Request(input *InputService4TestShapeInputShape) (req *aws.Request, output *InputService4TestShapeInputService4TestCaseOperation1Output) {
+ op := &aws.Operation{
+ Name: opInputService4TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &InputService4TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService4TestShapeInputService4TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService4ProtocolTest) InputService4TestCaseOperation1(input *InputService4TestShapeInputShape) (*InputService4TestShapeInputService4TestCaseOperation1Output, error) {
+ req, out := c.InputService4TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService4TestCaseOperation2 = "OperationName"
+
+// InputService4TestCaseOperation2Request generates a request for the InputService4TestCaseOperation2 operation.
+func (c *InputService4ProtocolTest) InputService4TestCaseOperation2Request(input *InputService4TestShapeInputShape) (req *aws.Request, output *InputService4TestShapeInputService4TestCaseOperation2Output) {
+ op := &aws.Operation{
+ Name: opInputService4TestCaseOperation2,
+ }
+
+ if input == nil {
+ input = &InputService4TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService4TestShapeInputService4TestCaseOperation2Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService4ProtocolTest) InputService4TestCaseOperation2(input *InputService4TestShapeInputShape) (*InputService4TestShapeInputService4TestCaseOperation2Output, error) {
+ req, out := c.InputService4TestCaseOperation2Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService4TestShapeInputService4TestCaseOperation1Output struct {
+ metadataInputService4TestShapeInputService4TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService4TestShapeInputService4TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService4TestShapeInputService4TestCaseOperation2Output struct {
+ metadataInputService4TestShapeInputService4TestCaseOperation2Output `json:"-" xml:"-"`
+}
+
+type metadataInputService4TestShapeInputService4TestCaseOperation2Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService4TestShapeInputShape struct {
+ ListArg []*string `type:"list" flattened:"true"`
+
+ ScalarArg *string `type:"string"`
+
+ metadataInputService4TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService4TestShapeInputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// InputService5ProtocolTest is a client for InputService5ProtocolTest.
+type InputService5ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new InputService5ProtocolTest client.
+func NewInputService5ProtocolTest(config *aws.Config) *InputService5ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "inputservice5protocoltest",
+ APIVersion: "2014-01-01",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(query.Build)
+ service.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return &InputService5ProtocolTest{service}
+}
+
+// newRequest creates a new request for a InputService5ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService5ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opInputService5TestCaseOperation1 = "OperationName"
+
+// InputService5TestCaseOperation1Request generates a request for the InputService5TestCaseOperation1 operation.
+func (c *InputService5ProtocolTest) InputService5TestCaseOperation1Request(input *InputService5TestShapeInputShape) (req *aws.Request, output *InputService5TestShapeInputService5TestCaseOperation1Output) {
+ op := &aws.Operation{
+ Name: opInputService5TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &InputService5TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService5TestShapeInputService5TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService5ProtocolTest) InputService5TestCaseOperation1(input *InputService5TestShapeInputShape) (*InputService5TestShapeInputService5TestCaseOperation1Output, error) {
+ req, out := c.InputService5TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService5TestCaseOperation2 = "OperationName"
+
+// InputService5TestCaseOperation2Request generates a request for the InputService5TestCaseOperation2 operation.
+func (c *InputService5ProtocolTest) InputService5TestCaseOperation2Request(input *InputService5TestShapeInputShape) (req *aws.Request, output *InputService5TestShapeInputService5TestCaseOperation2Output) {
+ op := &aws.Operation{
+ Name: opInputService5TestCaseOperation2,
+ }
+
+ if input == nil {
+ input = &InputService5TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService5TestShapeInputService5TestCaseOperation2Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService5ProtocolTest) InputService5TestCaseOperation2(input *InputService5TestShapeInputShape) (*InputService5TestShapeInputService5TestCaseOperation2Output, error) {
+ req, out := c.InputService5TestCaseOperation2Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService5TestShapeInputService5TestCaseOperation1Output struct {
+ metadataInputService5TestShapeInputService5TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService5TestShapeInputService5TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService5TestShapeInputService5TestCaseOperation2Output struct {
+ metadataInputService5TestShapeInputService5TestCaseOperation2Output `json:"-" xml:"-"`
+}
+
+type metadataInputService5TestShapeInputService5TestCaseOperation2Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService5TestShapeInputShape struct {
+ MapArg map[string]*string `type:"map"`
+
+ metadataInputService5TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService5TestShapeInputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// InputService6ProtocolTest is a client for InputService6ProtocolTest.
+type InputService6ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new InputService6ProtocolTest client.
+func NewInputService6ProtocolTest(config *aws.Config) *InputService6ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "inputservice6protocoltest",
+ APIVersion: "2014-01-01",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(query.Build)
+ service.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return &InputService6ProtocolTest{service}
+}
+
+// newRequest creates a new request for a InputService6ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService6ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opInputService6TestCaseOperation1 = "OperationName"
+
+// InputService6TestCaseOperation1Request generates a request for the InputService6TestCaseOperation1 operation.
+func (c *InputService6ProtocolTest) InputService6TestCaseOperation1Request(input *InputService6TestShapeInputShape) (req *aws.Request, output *InputService6TestShapeInputService6TestCaseOperation1Output) {
+ op := &aws.Operation{
+ Name: opInputService6TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &InputService6TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService6TestShapeInputService6TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService6ProtocolTest) InputService6TestCaseOperation1(input *InputService6TestShapeInputShape) (*InputService6TestShapeInputService6TestCaseOperation1Output, error) {
+ req, out := c.InputService6TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService6TestShapeInputService6TestCaseOperation1Output struct {
+ metadataInputService6TestShapeInputService6TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService6TestShapeInputService6TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService6TestShapeInputShape struct {
+ MapArg map[string]*string `locationNameKey:"TheKey" locationNameValue:"TheValue" type:"map"`
+
+ metadataInputService6TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService6TestShapeInputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// InputService7ProtocolTest is a client for InputService7ProtocolTest.
+type InputService7ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new InputService7ProtocolTest client.
+func NewInputService7ProtocolTest(config *aws.Config) *InputService7ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "inputservice7protocoltest",
+ APIVersion: "2014-01-01",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(query.Build)
+ service.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return &InputService7ProtocolTest{service}
+}
+
+// newRequest creates a new request for a InputService7ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService7ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opInputService7TestCaseOperation1 = "OperationName"
+
+// InputService7TestCaseOperation1Request generates a request for the InputService7TestCaseOperation1 operation.
+func (c *InputService7ProtocolTest) InputService7TestCaseOperation1Request(input *InputService7TestShapeInputShape) (req *aws.Request, output *InputService7TestShapeInputService7TestCaseOperation1Output) {
+ op := &aws.Operation{
+ Name: opInputService7TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &InputService7TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService7TestShapeInputService7TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService7ProtocolTest) InputService7TestCaseOperation1(input *InputService7TestShapeInputShape) (*InputService7TestShapeInputService7TestCaseOperation1Output, error) {
+ req, out := c.InputService7TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService7TestShapeInputService7TestCaseOperation1Output struct {
+ metadataInputService7TestShapeInputService7TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService7TestShapeInputService7TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService7TestShapeInputShape struct {
+ BlobArg []byte `type:"blob"`
+
+ metadataInputService7TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService7TestShapeInputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// InputService8ProtocolTest is a client for InputService8ProtocolTest.
+type InputService8ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new InputService8ProtocolTest client.
+func NewInputService8ProtocolTest(config *aws.Config) *InputService8ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "inputservice8protocoltest",
+ APIVersion: "2014-01-01",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(query.Build)
+ service.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return &InputService8ProtocolTest{service}
+}
+
+// newRequest creates a new request for a InputService8ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService8ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opInputService8TestCaseOperation1 = "OperationName"
+
+// InputService8TestCaseOperation1Request generates a request for the InputService8TestCaseOperation1 operation.
+func (c *InputService8ProtocolTest) InputService8TestCaseOperation1Request(input *InputService8TestShapeInputShape) (req *aws.Request, output *InputService8TestShapeInputService8TestCaseOperation1Output) {
+ op := &aws.Operation{
+ Name: opInputService8TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &InputService8TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService8TestShapeInputService8TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService8ProtocolTest) InputService8TestCaseOperation1(input *InputService8TestShapeInputShape) (*InputService8TestShapeInputService8TestCaseOperation1Output, error) {
+ req, out := c.InputService8TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService8TestShapeInputService8TestCaseOperation1Output struct {
+ metadataInputService8TestShapeInputService8TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService8TestShapeInputService8TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService8TestShapeInputShape struct {
+ TimeArg *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ metadataInputService8TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService8TestShapeInputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// InputService9ProtocolTest is a client for InputService9ProtocolTest.
+type InputService9ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new InputService9ProtocolTest client.
+func NewInputService9ProtocolTest(config *aws.Config) *InputService9ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "inputservice9protocoltest",
+ APIVersion: "2014-01-01",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(query.Build)
+ service.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return &InputService9ProtocolTest{service}
+}
+
+// newRequest creates a new request for a InputService9ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService9ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opInputService9TestCaseOperation1 = "OperationName"
+
+// InputService9TestCaseOperation1Request generates a request for the InputService9TestCaseOperation1 operation.
+func (c *InputService9ProtocolTest) InputService9TestCaseOperation1Request(input *InputService9TestShapeInputShape) (req *aws.Request, output *InputService9TestShapeInputService9TestCaseOperation1Output) {
+ op := &aws.Operation{
+ Name: opInputService9TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &InputService9TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService9TestShapeInputService9TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService9ProtocolTest) InputService9TestCaseOperation1(input *InputService9TestShapeInputShape) (*InputService9TestShapeInputService9TestCaseOperation1Output, error) {
+ req, out := c.InputService9TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService9TestCaseOperation2 = "OperationName"
+
+// InputService9TestCaseOperation2Request generates a request for the InputService9TestCaseOperation2 operation.
+func (c *InputService9ProtocolTest) InputService9TestCaseOperation2Request(input *InputService9TestShapeInputShape) (req *aws.Request, output *InputService9TestShapeInputService9TestCaseOperation2Output) {
+ op := &aws.Operation{
+ Name: opInputService9TestCaseOperation2,
+ }
+
+ if input == nil {
+ input = &InputService9TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService9TestShapeInputService9TestCaseOperation2Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService9ProtocolTest) InputService9TestCaseOperation2(input *InputService9TestShapeInputShape) (*InputService9TestShapeInputService9TestCaseOperation2Output, error) {
+ req, out := c.InputService9TestCaseOperation2Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService9TestCaseOperation3 = "OperationName"
+
+// InputService9TestCaseOperation3Request generates a request for the InputService9TestCaseOperation3 operation.
+func (c *InputService9ProtocolTest) InputService9TestCaseOperation3Request(input *InputService9TestShapeInputShape) (req *aws.Request, output *InputService9TestShapeInputService9TestCaseOperation3Output) {
+ op := &aws.Operation{
+ Name: opInputService9TestCaseOperation3,
+ }
+
+ if input == nil {
+ input = &InputService9TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService9TestShapeInputService9TestCaseOperation3Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService9ProtocolTest) InputService9TestCaseOperation3(input *InputService9TestShapeInputShape) (*InputService9TestShapeInputService9TestCaseOperation3Output, error) {
+ req, out := c.InputService9TestCaseOperation3Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService9TestCaseOperation4 = "OperationName"
+
+// InputService9TestCaseOperation4Request generates a request for the InputService9TestCaseOperation4 operation.
+func (c *InputService9ProtocolTest) InputService9TestCaseOperation4Request(input *InputService9TestShapeInputShape) (req *aws.Request, output *InputService9TestShapeInputService9TestCaseOperation4Output) {
+ op := &aws.Operation{
+ Name: opInputService9TestCaseOperation4,
+ }
+
+ if input == nil {
+ input = &InputService9TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService9TestShapeInputService9TestCaseOperation4Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService9ProtocolTest) InputService9TestCaseOperation4(input *InputService9TestShapeInputShape) (*InputService9TestShapeInputService9TestCaseOperation4Output, error) {
+ req, out := c.InputService9TestCaseOperation4Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService9TestCaseOperation5 = "OperationName"
+
+// InputService9TestCaseOperation5Request generates a request for the InputService9TestCaseOperation5 operation.
+func (c *InputService9ProtocolTest) InputService9TestCaseOperation5Request(input *InputService9TestShapeInputShape) (req *aws.Request, output *InputService9TestShapeInputService9TestCaseOperation5Output) {
+ op := &aws.Operation{
+ Name: opInputService9TestCaseOperation5,
+ }
+
+ if input == nil {
+ input = &InputService9TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService9TestShapeInputService9TestCaseOperation5Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService9ProtocolTest) InputService9TestCaseOperation5(input *InputService9TestShapeInputShape) (*InputService9TestShapeInputService9TestCaseOperation5Output, error) {
+ req, out := c.InputService9TestCaseOperation5Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService9TestCaseOperation6 = "OperationName"
+
+// InputService9TestCaseOperation6Request generates a request for the InputService9TestCaseOperation6 operation.
+func (c *InputService9ProtocolTest) InputService9TestCaseOperation6Request(input *InputService9TestShapeInputShape) (req *aws.Request, output *InputService9TestShapeInputService9TestCaseOperation6Output) {
+ op := &aws.Operation{
+ Name: opInputService9TestCaseOperation6,
+ }
+
+ if input == nil {
+ input = &InputService9TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService9TestShapeInputService9TestCaseOperation6Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService9ProtocolTest) InputService9TestCaseOperation6(input *InputService9TestShapeInputShape) (*InputService9TestShapeInputService9TestCaseOperation6Output, error) {
+ req, out := c.InputService9TestCaseOperation6Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService9TestShapeInputService9TestCaseOperation1Output struct {
+ metadataInputService9TestShapeInputService9TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService9TestShapeInputService9TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService9TestShapeInputService9TestCaseOperation2Output struct {
+ metadataInputService9TestShapeInputService9TestCaseOperation2Output `json:"-" xml:"-"`
+}
+
+type metadataInputService9TestShapeInputService9TestCaseOperation2Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService9TestShapeInputService9TestCaseOperation3Output struct {
+ metadataInputService9TestShapeInputService9TestCaseOperation3Output `json:"-" xml:"-"`
+}
+
+type metadataInputService9TestShapeInputService9TestCaseOperation3Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService9TestShapeInputService9TestCaseOperation4Output struct {
+ metadataInputService9TestShapeInputService9TestCaseOperation4Output `json:"-" xml:"-"`
+}
+
+type metadataInputService9TestShapeInputService9TestCaseOperation4Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService9TestShapeInputService9TestCaseOperation5Output struct {
+ metadataInputService9TestShapeInputService9TestCaseOperation5Output `json:"-" xml:"-"`
+}
+
+type metadataInputService9TestShapeInputService9TestCaseOperation5Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService9TestShapeInputService9TestCaseOperation6Output struct {
+ metadataInputService9TestShapeInputService9TestCaseOperation6Output `json:"-" xml:"-"`
+}
+
+type metadataInputService9TestShapeInputService9TestCaseOperation6Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService9TestShapeInputShape struct {
+ RecursiveStruct *InputService9TestShapeRecursiveStructType `type:"structure"`
+
+ metadataInputService9TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService9TestShapeInputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService9TestShapeRecursiveStructType struct {
+ NoRecurse *string `type:"string"`
+
+ RecursiveList []*InputService9TestShapeRecursiveStructType `type:"list"`
+
+ RecursiveMap map[string]*InputService9TestShapeRecursiveStructType `type:"map"`
+
+ RecursiveStruct *InputService9TestShapeRecursiveStructType `type:"structure"`
+
+ metadataInputService9TestShapeRecursiveStructType `json:"-" xml:"-"`
+}
+
+type metadataInputService9TestShapeRecursiveStructType struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//
+// Tests begin here
+//
+
+func TestInputService1ProtocolTestScalarMembersCase1(t *testing.T) {
+ svc := NewInputService1ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService1TestShapeInputShape{
+ Bar: aws.String("val2"),
+ Foo: aws.String("val1"),
+ }
+ req, _ := svc.InputService1TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ assert.Equal(t, util.Trim(`Action=OperationName&Bar=val2&Foo=val1&Version=2014-01-01`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService2ProtocolTestNestedStructureMembersCase1(t *testing.T) {
+ svc := NewInputService2ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService2TestShapeInputShape{
+ StructArg: &InputService2TestShapeStructType{
+ ScalarArg: aws.String("foo"),
+ },
+ }
+ req, _ := svc.InputService2TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ assert.Equal(t, util.Trim(`Action=OperationName&StructArg.ScalarArg=foo&Version=2014-01-01`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService3ProtocolTestListTypesCase1(t *testing.T) {
+ svc := NewInputService3ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService3TestShapeInputShape{
+ ListArg: []*string{
+ aws.String("foo"),
+ aws.String("bar"),
+ aws.String("baz"),
+ },
+ }
+ req, _ := svc.InputService3TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ assert.Equal(t, util.Trim(`Action=OperationName&ListArg.member.1=foo&ListArg.member.2=bar&ListArg.member.3=baz&Version=2014-01-01`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService3ProtocolTestListTypesCase2(t *testing.T) {
+ svc := NewInputService3ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService3TestShapeInputShape{
+ ListArg: []*string{},
+ }
+ req, _ := svc.InputService3TestCaseOperation2Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ assert.Equal(t, util.Trim(`Action=OperationName&ListArg=&Version=2014-01-01`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService4ProtocolTestFlattenedListCase1(t *testing.T) {
+ svc := NewInputService4ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService4TestShapeInputShape{
+ ListArg: []*string{
+ aws.String("a"),
+ aws.String("b"),
+ aws.String("c"),
+ },
+ ScalarArg: aws.String("foo"),
+ }
+ req, _ := svc.InputService4TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ assert.Equal(t, util.Trim(`Action=OperationName&ListArg.1=a&ListArg.2=b&ListArg.3=c&ScalarArg=foo&Version=2014-01-01`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService4ProtocolTestFlattenedListCase2(t *testing.T) {
+ svc := NewInputService4ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService4TestShapeInputShape{
+ ListArg: []*string{},
+ ScalarArg: aws.String("foo"),
+ }
+ req, _ := svc.InputService4TestCaseOperation2Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ assert.Equal(t, util.Trim(`Action=OperationName&ListArg=&ScalarArg=foo&Version=2014-01-01`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService5ProtocolTestSerializeMapTypeCase1(t *testing.T) {
+ svc := NewInputService5ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService5TestShapeInputShape{
+ MapArg: map[string]*string{
+ "key1": aws.String("val1"),
+ "key2": aws.String("val2"),
+ },
+ }
+ req, _ := svc.InputService5TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ assert.Equal(t, util.Trim(`Action=OperationName&MapArg.entry.1.key=key1&MapArg.entry.1.value=val1&MapArg.entry.2.key=key2&MapArg.entry.2.value=val2&Version=2014-01-01`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService5ProtocolTestSerializeMapTypeCase2(t *testing.T) {
+ svc := NewInputService5ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService5TestShapeInputShape{
+ MapArg: map[string]*string{},
+ }
+ req, _ := svc.InputService5TestCaseOperation2Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ assert.Equal(t, util.Trim(`Action=OperationName&MapArg=&Version=2014-01-01`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService6ProtocolTestSerializeMapTypeWithLocationNameCase1(t *testing.T) {
+ svc := NewInputService6ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService6TestShapeInputShape{
+ MapArg: map[string]*string{
+ "key1": aws.String("val1"),
+ "key2": aws.String("val2"),
+ },
+ }
+ req, _ := svc.InputService6TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ assert.Equal(t, util.Trim(`Action=OperationName&MapArg.entry.1.TheKey=key1&MapArg.entry.1.TheValue=val1&MapArg.entry.2.TheKey=key2&MapArg.entry.2.TheValue=val2&Version=2014-01-01`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService7ProtocolTestBase64EncodedBlobsCase1(t *testing.T) {
+ svc := NewInputService7ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService7TestShapeInputShape{
+ BlobArg: []byte("foo"),
+ }
+ req, _ := svc.InputService7TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ assert.Equal(t, util.Trim(`Action=OperationName&BlobArg=Zm9v&Version=2014-01-01`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService8ProtocolTestTimestampValuesCase1(t *testing.T) {
+ svc := NewInputService8ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService8TestShapeInputShape{
+ TimeArg: aws.Time(time.Unix(1422172800, 0)),
+ }
+ req, _ := svc.InputService8TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ assert.Equal(t, util.Trim(`Action=OperationName&TimeArg=2015-01-25T08%3A00%3A00Z&Version=2014-01-01`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService9ProtocolTestRecursiveShapesCase1(t *testing.T) {
+ svc := NewInputService9ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService9TestShapeInputShape{
+ RecursiveStruct: &InputService9TestShapeRecursiveStructType{
+ NoRecurse: aws.String("foo"),
+ },
+ }
+ req, _ := svc.InputService9TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ assert.Equal(t, util.Trim(`Action=OperationName&RecursiveStruct.NoRecurse=foo&Version=2014-01-01`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService9ProtocolTestRecursiveShapesCase2(t *testing.T) {
+ svc := NewInputService9ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService9TestShapeInputShape{
+ RecursiveStruct: &InputService9TestShapeRecursiveStructType{
+ RecursiveStruct: &InputService9TestShapeRecursiveStructType{
+ NoRecurse: aws.String("foo"),
+ },
+ },
+ }
+ req, _ := svc.InputService9TestCaseOperation2Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ assert.Equal(t, util.Trim(`Action=OperationName&RecursiveStruct.RecursiveStruct.NoRecurse=foo&Version=2014-01-01`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService9ProtocolTestRecursiveShapesCase3(t *testing.T) {
+ svc := NewInputService9ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService9TestShapeInputShape{
+ RecursiveStruct: &InputService9TestShapeRecursiveStructType{
+ RecursiveStruct: &InputService9TestShapeRecursiveStructType{
+ RecursiveStruct: &InputService9TestShapeRecursiveStructType{
+ RecursiveStruct: &InputService9TestShapeRecursiveStructType{
+ NoRecurse: aws.String("foo"),
+ },
+ },
+ },
+ },
+ }
+ req, _ := svc.InputService9TestCaseOperation3Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ assert.Equal(t, util.Trim(`Action=OperationName&RecursiveStruct.RecursiveStruct.RecursiveStruct.RecursiveStruct.NoRecurse=foo&Version=2014-01-01`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService9ProtocolTestRecursiveShapesCase4(t *testing.T) {
+ svc := NewInputService9ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService9TestShapeInputShape{
+ RecursiveStruct: &InputService9TestShapeRecursiveStructType{
+ RecursiveList: []*InputService9TestShapeRecursiveStructType{
+ {
+ NoRecurse: aws.String("foo"),
+ },
+ {
+ NoRecurse: aws.String("bar"),
+ },
+ },
+ },
+ }
+ req, _ := svc.InputService9TestCaseOperation4Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ assert.Equal(t, util.Trim(`Action=OperationName&RecursiveStruct.RecursiveList.member.1.NoRecurse=foo&RecursiveStruct.RecursiveList.member.2.NoRecurse=bar&Version=2014-01-01`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService9ProtocolTestRecursiveShapesCase5(t *testing.T) {
+ svc := NewInputService9ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService9TestShapeInputShape{
+ RecursiveStruct: &InputService9TestShapeRecursiveStructType{
+ RecursiveList: []*InputService9TestShapeRecursiveStructType{
+ {
+ NoRecurse: aws.String("foo"),
+ },
+ {
+ RecursiveStruct: &InputService9TestShapeRecursiveStructType{
+ NoRecurse: aws.String("bar"),
+ },
+ },
+ },
+ },
+ }
+ req, _ := svc.InputService9TestCaseOperation5Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ assert.Equal(t, util.Trim(`Action=OperationName&RecursiveStruct.RecursiveList.member.1.NoRecurse=foo&RecursiveStruct.RecursiveList.member.2.RecursiveStruct.NoRecurse=bar&Version=2014-01-01`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService9ProtocolTestRecursiveShapesCase6(t *testing.T) {
+ svc := NewInputService9ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService9TestShapeInputShape{
+ RecursiveStruct: &InputService9TestShapeRecursiveStructType{
+ RecursiveMap: map[string]*InputService9TestShapeRecursiveStructType{
+ "bar": {
+ NoRecurse: aws.String("bar"),
+ },
+ "foo": {
+ NoRecurse: aws.String("foo"),
+ },
+ },
+ },
+ }
+ req, _ := svc.InputService9TestCaseOperation6Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ assert.Equal(t, util.Trim(`Action=OperationName&RecursiveStruct.RecursiveMap.entry.1.key=bar&RecursiveStruct.RecursiveMap.entry.1.value.NoRecurse=bar&RecursiveStruct.RecursiveMap.entry.2.key=foo&RecursiveStruct.RecursiveMap.entry.2.value.NoRecurse=foo&Version=2014-01-01`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/queryutil/queryutil.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/queryutil/queryutil.go
new file mode 100644
index 0000000..3b417a8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/queryutil/queryutil.go
@@ -0,0 +1,223 @@
+package queryutil
+
+import (
+ "encoding/base64"
+ "fmt"
+ "net/url"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Parse parses an object i and fills a url.Values object. The isEC2 flag
+// indicates if this is the EC2 Query sub-protocol.
+func Parse(body url.Values, i interface{}, isEC2 bool) error {
+ q := queryParser{isEC2: isEC2}
+ return q.parseValue(body, reflect.ValueOf(i), "", "")
+}
+
+func elemOf(value reflect.Value) reflect.Value {
+ for value.Kind() == reflect.Ptr {
+ value = value.Elem()
+ }
+ return value
+}
+
+type queryParser struct {
+ isEC2 bool
+}
+
+func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
+ value = elemOf(value)
+
+ // no need to handle zero values
+ if !value.IsValid() {
+ return nil
+ }
+
+ t := tag.Get("type")
+ if t == "" {
+ switch value.Kind() {
+ case reflect.Struct:
+ t = "structure"
+ case reflect.Slice:
+ t = "list"
+ case reflect.Map:
+ t = "map"
+ }
+ }
+
+ switch t {
+ case "structure":
+ return q.parseStruct(v, value, prefix)
+ case "list":
+ return q.parseList(v, value, prefix, tag)
+ case "map":
+ return q.parseMap(v, value, prefix, tag)
+ default:
+ return q.parseScalar(v, value, prefix, tag)
+ }
+}
+
+func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error {
+ if !value.IsValid() {
+ return nil
+ }
+
+ t := value.Type()
+ for i := 0; i < value.NumField(); i++ {
+ if c := t.Field(i).Name[0:1]; strings.ToLower(c) == c {
+ continue // ignore unexported fields
+ }
+
+ value := elemOf(value.Field(i))
+ field := t.Field(i)
+ var name string
+
+ if q.isEC2 {
+ name = field.Tag.Get("queryName")
+ }
+ if name == "" {
+ if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
+ name = field.Tag.Get("locationNameList")
+ } else if locName := field.Tag.Get("locationName"); locName != "" {
+ name = locName
+ }
+ if name != "" && q.isEC2 {
+ name = strings.ToUpper(name[0:1]) + name[1:]
+ }
+ }
+ if name == "" {
+ name = field.Name
+ }
+
+ if prefix != "" {
+ name = prefix + "." + name
+ }
+
+ if err := q.parseValue(v, value, name, field.Tag); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
+ // If it's empty, generate an empty value
+ if !value.IsNil() && value.Len() == 0 {
+ v.Set(prefix, "")
+ return nil
+ }
+
+ // check for unflattened list member
+ if !q.isEC2 && tag.Get("flattened") == "" {
+ prefix += ".member"
+ }
+
+ for i := 0; i < value.Len(); i++ {
+ slicePrefix := prefix
+ if slicePrefix == "" {
+ slicePrefix = strconv.Itoa(i + 1)
+ } else {
+ slicePrefix = slicePrefix + "." + strconv.Itoa(i+1)
+ }
+ if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
+ // If it's empty, generate an empty value
+ if !value.IsNil() && value.Len() == 0 {
+ v.Set(prefix, "")
+ return nil
+ }
+
+ // check for unflattened list member
+ if !q.isEC2 && tag.Get("flattened") == "" {
+ prefix += ".entry"
+ }
+
+ // sort keys for improved serialization consistency.
+ // this is not strictly necessary for protocol support.
+ mapKeyValues := value.MapKeys()
+ mapKeys := map[string]reflect.Value{}
+ mapKeyNames := make([]string, len(mapKeyValues))
+ for i, mapKey := range mapKeyValues {
+ name := mapKey.String()
+ mapKeys[name] = mapKey
+ mapKeyNames[i] = name
+ }
+ sort.Strings(mapKeyNames)
+
+ for i, mapKeyName := range mapKeyNames {
+ mapKey := mapKeys[mapKeyName]
+ mapValue := value.MapIndex(mapKey)
+
+ kname := tag.Get("locationNameKey")
+ if kname == "" {
+ kname = "key"
+ }
+ vname := tag.Get("locationNameValue")
+ if vname == "" {
+ vname = "value"
+ }
+
+ // serialize key
+ var keyName string
+ if prefix == "" {
+ keyName = strconv.Itoa(i+1) + "." + kname
+ } else {
+ keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname
+ }
+
+ if err := q.parseValue(v, mapKey, keyName, ""); err != nil {
+ return err
+ }
+
+ // serialize value
+ var valueName string
+ if prefix == "" {
+ valueName = strconv.Itoa(i+1) + "." + vname
+ } else {
+ valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname
+ }
+
+ if err := q.parseValue(v, mapValue, valueName, ""); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error {
+ switch value := r.Interface().(type) {
+ case string:
+ v.Set(name, value)
+ case []byte:
+ if !r.IsNil() {
+ v.Set(name, base64.StdEncoding.EncodeToString(value))
+ }
+ case bool:
+ v.Set(name, strconv.FormatBool(value))
+ case int64:
+ v.Set(name, strconv.FormatInt(value, 10))
+ case int:
+ v.Set(name, strconv.Itoa(value))
+ case float64:
+ v.Set(name, strconv.FormatFloat(value, 'f', -1, 64))
+ case float32:
+ v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32))
+ case time.Time:
+ const ISO8601UTC = "2006-01-02T15:04:05Z"
+ v.Set(name, value.UTC().Format(ISO8601UTC))
+ default:
+ return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name())
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal.go
new file mode 100644
index 0000000..e8cfa92
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal.go
@@ -0,0 +1,29 @@
+package query
+
+//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/output/query.json unmarshal_test.go
+
+import (
+ "encoding/xml"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil"
+)
+
+// Unmarshal unmarshals a response for an AWS Query service.
+func Unmarshal(r *aws.Request) {
+ defer r.HTTPResponse.Body.Close()
+ if r.DataFilled() {
+ decoder := xml.NewDecoder(r.HTTPResponse.Body)
+ err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result")
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed decoding Query response", err)
+ return
+ }
+ }
+}
+
+// UnmarshalMeta unmarshals header response values for an AWS Query service.
+func UnmarshalMeta(r *aws.Request) {
+ // TODO implement unmarshaling of request IDs
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal_error.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal_error.go
new file mode 100644
index 0000000..d88ee33
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal_error.go
@@ -0,0 +1,33 @@
+package query
+
+import (
+ "encoding/xml"
+ "io"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+type xmlErrorResponse struct {
+ XMLName xml.Name `xml:"ErrorResponse"`
+ Code string `xml:"Error>Code"`
+ Message string `xml:"Error>Message"`
+ RequestID string `xml:"RequestId"`
+}
+
+// UnmarshalError unmarshals an error response for an AWS Query service.
+func UnmarshalError(r *aws.Request) {
+ defer r.HTTPResponse.Body.Close()
+
+ resp := &xmlErrorResponse{}
+ err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp)
+ if err != nil && err != io.EOF {
+ r.Error = awserr.New("SerializationError", "failed to decode query XML error response", err)
+ } else {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(resp.Code, resp.Message, nil),
+ r.HTTPResponse.StatusCode,
+ resp.RequestID,
+ )
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal_test.go
new file mode 100644
index 0000000..924d3d4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal_test.go
@@ -0,0 +1,1432 @@
+package query_test
+
+import (
+ "bytes"
+ "encoding/json"
+ "encoding/xml"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/internal/protocol/query"
+ "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil"
+ "github.com/aws/aws-sdk-go/internal/signer/v4"
+ "github.com/aws/aws-sdk-go/internal/util"
+ "github.com/stretchr/testify/assert"
+)
+
+var _ bytes.Buffer // always import bytes
+var _ http.Request
+var _ json.Marshaler
+var _ time.Time
+var _ xmlutil.XMLNode
+var _ xml.Attr
+var _ = ioutil.Discard
+var _ = util.Trim("")
+var _ = url.Values{}
+var _ = io.EOF
+
+// OutputService1ProtocolTest is a client for OutputService1ProtocolTest.
+type OutputService1ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new OutputService1ProtocolTest client.
+func NewOutputService1ProtocolTest(config *aws.Config) *OutputService1ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "outputservice1protocoltest",
+ APIVersion: "",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(query.Build)
+ service.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return &OutputService1ProtocolTest{service}
+}
+
+// newRequest creates a new request for a OutputService1ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService1ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opOutputService1TestCaseOperation1 = "OperationName"
+
+// OutputService1TestCaseOperation1Request generates a request for the OutputService1TestCaseOperation1 operation.
+func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1Request(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (req *aws.Request, output *OutputService1TestShapeOutputShape) {
+ op := &aws.Operation{
+ Name: opOutputService1TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService1TestShapeOutputService1TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService1TestShapeOutputShape{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (*OutputService1TestShapeOutputShape, error) {
+ req, out := c.OutputService1TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService1TestShapeOutputService1TestCaseOperation1Input struct {
+ metadataOutputService1TestShapeOutputService1TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService1TestShapeOutputService1TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService1TestShapeOutputShape struct {
+ Char *string `type:"character"`
+
+ Double *float64 `type:"double"`
+
+ FalseBool *bool `type:"boolean"`
+
+ Float *float64 `type:"float"`
+
+ Long *int64 `type:"long"`
+
+ Num *int64 `locationName:"FooNum" type:"integer"`
+
+ Str *string `type:"string"`
+
+ Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ TrueBool *bool `type:"boolean"`
+
+ metadataOutputService1TestShapeOutputShape `json:"-" xml:"-"`
+}
+
+type metadataOutputService1TestShapeOutputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// OutputService2ProtocolTest is a client for OutputService2ProtocolTest.
+type OutputService2ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new OutputService2ProtocolTest client.
+func NewOutputService2ProtocolTest(config *aws.Config) *OutputService2ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "outputservice2protocoltest",
+ APIVersion: "",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(query.Build)
+ service.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return &OutputService2ProtocolTest{service}
+}
+
+// newRequest creates a new request for a OutputService2ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService2ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opOutputService2TestCaseOperation1 = "OperationName"
+
+// OutputService2TestCaseOperation1Request generates a request for the OutputService2TestCaseOperation1 operation.
+func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1Request(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (req *aws.Request, output *OutputService2TestShapeOutputShape) {
+ op := &aws.Operation{
+ Name: opOutputService2TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService2TestShapeOutputService2TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService2TestShapeOutputShape{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (*OutputService2TestShapeOutputShape, error) {
+ req, out := c.OutputService2TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService2TestShapeOutputService2TestCaseOperation1Input struct {
+ metadataOutputService2TestShapeOutputService2TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService2TestShapeOutputService2TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService2TestShapeOutputShape struct {
+ Num *int64 `type:"integer"`
+
+ Str *string `type:"string"`
+
+ metadataOutputService2TestShapeOutputShape `json:"-" xml:"-"`
+}
+
+type metadataOutputService2TestShapeOutputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// OutputService3ProtocolTest is a client for OutputService3ProtocolTest.
+type OutputService3ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new OutputService3ProtocolTest client.
+func NewOutputService3ProtocolTest(config *aws.Config) *OutputService3ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "outputservice3protocoltest",
+ APIVersion: "",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(query.Build)
+ service.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return &OutputService3ProtocolTest{service}
+}
+
+// newRequest creates a new request for a OutputService3ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService3ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opOutputService3TestCaseOperation1 = "OperationName"
+
+// OutputService3TestCaseOperation1Request generates a request for the OutputService3TestCaseOperation1 operation.
+func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1Request(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (req *aws.Request, output *OutputService3TestShapeOutputShape) {
+ op := &aws.Operation{
+ Name: opOutputService3TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService3TestShapeOutputService3TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService3TestShapeOutputShape{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (*OutputService3TestShapeOutputShape, error) {
+ req, out := c.OutputService3TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService3TestShapeOutputService3TestCaseOperation1Input struct {
+ metadataOutputService3TestShapeOutputService3TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService3TestShapeOutputService3TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService3TestShapeOutputShape struct {
+ Blob []byte `type:"blob"`
+
+ metadataOutputService3TestShapeOutputShape `json:"-" xml:"-"`
+}
+
+type metadataOutputService3TestShapeOutputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// OutputService4ProtocolTest is a client for OutputService4ProtocolTest.
+type OutputService4ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new OutputService4ProtocolTest client.
+func NewOutputService4ProtocolTest(config *aws.Config) *OutputService4ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "outputservice4protocoltest",
+ APIVersion: "",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(query.Build)
+ service.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return &OutputService4ProtocolTest{service}
+}
+
+// newRequest creates a new request for a OutputService4ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService4ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opOutputService4TestCaseOperation1 = "OperationName"
+
+// OutputService4TestCaseOperation1Request generates a request for the OutputService4TestCaseOperation1 operation.
+func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1Request(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (req *aws.Request, output *OutputService4TestShapeOutputShape) {
+ op := &aws.Operation{
+ Name: opOutputService4TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService4TestShapeOutputService4TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService4TestShapeOutputShape{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (*OutputService4TestShapeOutputShape, error) {
+ req, out := c.OutputService4TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService4TestShapeOutputService4TestCaseOperation1Input struct {
+ metadataOutputService4TestShapeOutputService4TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService4TestShapeOutputService4TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService4TestShapeOutputShape struct {
+ ListMember []*string `type:"list"`
+
+ metadataOutputService4TestShapeOutputShape `json:"-" xml:"-"`
+}
+
+type metadataOutputService4TestShapeOutputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// OutputService5ProtocolTest is a client for OutputService5ProtocolTest.
+type OutputService5ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new OutputService5ProtocolTest client.
+func NewOutputService5ProtocolTest(config *aws.Config) *OutputService5ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "outputservice5protocoltest",
+ APIVersion: "",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(query.Build)
+ service.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return &OutputService5ProtocolTest{service}
+}
+
+// newRequest creates a new request for a OutputService5ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService5ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opOutputService5TestCaseOperation1 = "OperationName"
+
+// OutputService5TestCaseOperation1Request generates a request for the OutputService5TestCaseOperation1 operation.
+func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1Request(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (req *aws.Request, output *OutputService5TestShapeOutputShape) {
+ op := &aws.Operation{
+ Name: opOutputService5TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService5TestShapeOutputService5TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService5TestShapeOutputShape{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (*OutputService5TestShapeOutputShape, error) {
+ req, out := c.OutputService5TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService5TestShapeOutputService5TestCaseOperation1Input struct {
+ metadataOutputService5TestShapeOutputService5TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService5TestShapeOutputService5TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService5TestShapeOutputShape struct {
+ ListMember []*string `locationNameList:"item" type:"list"`
+
+ metadataOutputService5TestShapeOutputShape `json:"-" xml:"-"`
+}
+
+type metadataOutputService5TestShapeOutputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// OutputService6ProtocolTest is a client for OutputService6ProtocolTest.
+type OutputService6ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new OutputService6ProtocolTest client.
+func NewOutputService6ProtocolTest(config *aws.Config) *OutputService6ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "outputservice6protocoltest",
+ APIVersion: "",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(query.Build)
+ service.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return &OutputService6ProtocolTest{service}
+}
+
+// newRequest creates a new request for a OutputService6ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService6ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opOutputService6TestCaseOperation1 = "OperationName"
+
+// OutputService6TestCaseOperation1Request generates a request for the OutputService6TestCaseOperation1 operation.
+func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1Request(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (req *aws.Request, output *OutputService6TestShapeOutputShape) {
+ op := &aws.Operation{
+ Name: opOutputService6TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService6TestShapeOutputService6TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService6TestShapeOutputShape{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (*OutputService6TestShapeOutputShape, error) {
+ req, out := c.OutputService6TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService6TestShapeOutputService6TestCaseOperation1Input struct {
+ metadataOutputService6TestShapeOutputService6TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService6TestShapeOutputService6TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService6TestShapeOutputShape struct {
+ ListMember []*string `type:"list" flattened:"true"`
+
+ metadataOutputService6TestShapeOutputShape `json:"-" xml:"-"`
+}
+
+type metadataOutputService6TestShapeOutputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// OutputService7ProtocolTest is a client for OutputService7ProtocolTest.
+type OutputService7ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new OutputService7ProtocolTest client.
+func NewOutputService7ProtocolTest(config *aws.Config) *OutputService7ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "outputservice7protocoltest",
+ APIVersion: "",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(query.Build)
+ service.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return &OutputService7ProtocolTest{service}
+}
+
+// newRequest creates a new request for a OutputService7ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService7ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opOutputService7TestCaseOperation1 = "OperationName"
+
+// OutputService7TestCaseOperation1Request generates a request for the OutputService7TestCaseOperation1 operation.
+func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1Request(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (req *aws.Request, output *OutputService7TestShapeOutputShape) {
+ op := &aws.Operation{
+ Name: opOutputService7TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService7TestShapeOutputService7TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService7TestShapeOutputShape{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (*OutputService7TestShapeOutputShape, error) {
+ req, out := c.OutputService7TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService7TestShapeOutputService7TestCaseOperation1Input struct {
+ metadataOutputService7TestShapeOutputService7TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService7TestShapeOutputService7TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService7TestShapeOutputShape struct {
+ ListMember []*string `type:"list" flattened:"true"`
+
+ metadataOutputService7TestShapeOutputShape `json:"-" xml:"-"`
+}
+
+type metadataOutputService7TestShapeOutputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// OutputService8ProtocolTest is a client for OutputService8ProtocolTest.
+type OutputService8ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new OutputService8ProtocolTest client.
+func NewOutputService8ProtocolTest(config *aws.Config) *OutputService8ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "outputservice8protocoltest",
+ APIVersion: "",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(query.Build)
+ service.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return &OutputService8ProtocolTest{service}
+}
+
+// newRequest creates a new request for a OutputService8ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService8ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opOutputService8TestCaseOperation1 = "OperationName"
+
+// OutputService8TestCaseOperation1Request generates a request for the OutputService8TestCaseOperation1 operation.
+func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1Request(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (req *aws.Request, output *OutputService8TestShapeOutputShape) {
+ op := &aws.Operation{
+ Name: opOutputService8TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService8TestShapeOutputService8TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService8TestShapeOutputShape{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (*OutputService8TestShapeOutputShape, error) {
+ req, out := c.OutputService8TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService8TestShapeOutputService8TestCaseOperation1Input struct {
+ metadataOutputService8TestShapeOutputService8TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService8TestShapeOutputService8TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService8TestShapeOutputShape struct {
+ List []*OutputService8TestShapeStructureShape `type:"list"`
+
+ metadataOutputService8TestShapeOutputShape `json:"-" xml:"-"`
+}
+
+type metadataOutputService8TestShapeOutputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService8TestShapeStructureShape struct {
+ Bar *string `type:"string"`
+
+ Baz *string `type:"string"`
+
+ Foo *string `type:"string"`
+
+ metadataOutputService8TestShapeStructureShape `json:"-" xml:"-"`
+}
+
+type metadataOutputService8TestShapeStructureShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// OutputService9ProtocolTest is a client for OutputService9ProtocolTest.
+type OutputService9ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new OutputService9ProtocolTest client.
+func NewOutputService9ProtocolTest(config *aws.Config) *OutputService9ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "outputservice9protocoltest",
+ APIVersion: "",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(query.Build)
+ service.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return &OutputService9ProtocolTest{service}
+}
+
+// newRequest creates a new request for a OutputService9ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService9ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opOutputService9TestCaseOperation1 = "OperationName"
+
+// OutputService9TestCaseOperation1Request generates a request for the OutputService9TestCaseOperation1 operation.
+func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1Request(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (req *aws.Request, output *OutputService9TestShapeOutputShape) {
+ op := &aws.Operation{
+ Name: opOutputService9TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService9TestShapeOutputService9TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService9TestShapeOutputShape{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (*OutputService9TestShapeOutputShape, error) {
+ req, out := c.OutputService9TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService9TestShapeOutputService9TestCaseOperation1Input struct {
+ metadataOutputService9TestShapeOutputService9TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService9TestShapeOutputService9TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService9TestShapeOutputShape struct {
+ List []*OutputService9TestShapeStructureShape `type:"list" flattened:"true"`
+
+ metadataOutputService9TestShapeOutputShape `json:"-" xml:"-"`
+}
+
+type metadataOutputService9TestShapeOutputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService9TestShapeStructureShape struct {
+ Bar *string `type:"string"`
+
+ Baz *string `type:"string"`
+
+ Foo *string `type:"string"`
+
+ metadataOutputService9TestShapeStructureShape `json:"-" xml:"-"`
+}
+
+type metadataOutputService9TestShapeStructureShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// OutputService10ProtocolTest is a client for OutputService10ProtocolTest.
+type OutputService10ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new OutputService10ProtocolTest client.
+func NewOutputService10ProtocolTest(config *aws.Config) *OutputService10ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "outputservice10protocoltest",
+ APIVersion: "",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(query.Build)
+ service.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return &OutputService10ProtocolTest{service}
+}
+
+// newRequest creates a new request for a OutputService10ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService10ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opOutputService10TestCaseOperation1 = "OperationName"
+
+// OutputService10TestCaseOperation1Request generates a request for the OutputService10TestCaseOperation1 operation.
+func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1Request(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (req *aws.Request, output *OutputService10TestShapeOutputShape) {
+ op := &aws.Operation{
+ Name: opOutputService10TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService10TestShapeOutputService10TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService10TestShapeOutputShape{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (*OutputService10TestShapeOutputShape, error) {
+ req, out := c.OutputService10TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService10TestShapeOutputService10TestCaseOperation1Input struct {
+ metadataOutputService10TestShapeOutputService10TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService10TestShapeOutputService10TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService10TestShapeOutputShape struct {
+ List []*string `locationNameList:"NamedList" type:"list" flattened:"true"`
+
+ metadataOutputService10TestShapeOutputShape `json:"-" xml:"-"`
+}
+
+type metadataOutputService10TestShapeOutputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// OutputService11ProtocolTest is a client for OutputService11ProtocolTest.
+type OutputService11ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new OutputService11ProtocolTest client.
+func NewOutputService11ProtocolTest(config *aws.Config) *OutputService11ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "outputservice11protocoltest",
+ APIVersion: "",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(query.Build)
+ service.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return &OutputService11ProtocolTest{service}
+}
+
+// newRequest creates a new request for a OutputService11ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService11ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opOutputService11TestCaseOperation1 = "OperationName"
+
+// OutputService11TestCaseOperation1Request generates a request for the OutputService11TestCaseOperation1 operation.
+func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1Request(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (req *aws.Request, output *OutputService11TestShapeOutputShape) {
+ op := &aws.Operation{
+ Name: opOutputService11TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService11TestShapeOutputService11TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService11TestShapeOutputShape{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (*OutputService11TestShapeOutputShape, error) {
+ req, out := c.OutputService11TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService11TestShapeOutputService11TestCaseOperation1Input struct {
+ metadataOutputService11TestShapeOutputService11TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService11TestShapeOutputService11TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService11TestShapeOutputShape struct {
+ Map map[string]*OutputService11TestShapeStructType `type:"map"`
+
+ metadataOutputService11TestShapeOutputShape `json:"-" xml:"-"`
+}
+
+type metadataOutputService11TestShapeOutputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService11TestShapeStructType struct {
+ Foo *string `locationName:"foo" type:"string"`
+
+ metadataOutputService11TestShapeStructType `json:"-" xml:"-"`
+}
+
+type metadataOutputService11TestShapeStructType struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// OutputService12ProtocolTest is a client for OutputService12ProtocolTest.
+type OutputService12ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new OutputService12ProtocolTest client.
+func NewOutputService12ProtocolTest(config *aws.Config) *OutputService12ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "outputservice12protocoltest",
+ APIVersion: "",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(query.Build)
+ service.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return &OutputService12ProtocolTest{service}
+}
+
+// newRequest creates a new request for a OutputService12ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService12ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opOutputService12TestCaseOperation1 = "OperationName"
+
+// OutputService12TestCaseOperation1Request generates a request for the OutputService12TestCaseOperation1 operation.
+func (c *OutputService12ProtocolTest) OutputService12TestCaseOperation1Request(input *OutputService12TestShapeOutputService12TestCaseOperation1Input) (req *aws.Request, output *OutputService12TestShapeOutputShape) {
+ op := &aws.Operation{
+ Name: opOutputService12TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService12TestShapeOutputService12TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService12TestShapeOutputShape{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService12ProtocolTest) OutputService12TestCaseOperation1(input *OutputService12TestShapeOutputService12TestCaseOperation1Input) (*OutputService12TestShapeOutputShape, error) {
+ req, out := c.OutputService12TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService12TestShapeOutputService12TestCaseOperation1Input struct {
+ metadataOutputService12TestShapeOutputService12TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService12TestShapeOutputService12TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService12TestShapeOutputShape struct {
+ Map map[string]*string `type:"map" flattened:"true"`
+
+ metadataOutputService12TestShapeOutputShape `json:"-" xml:"-"`
+}
+
+type metadataOutputService12TestShapeOutputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// OutputService13ProtocolTest is a client for OutputService13ProtocolTest.
+type OutputService13ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new OutputService13ProtocolTest client.
+func NewOutputService13ProtocolTest(config *aws.Config) *OutputService13ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "outputservice13protocoltest",
+ APIVersion: "",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(query.Build)
+ service.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return &OutputService13ProtocolTest{service}
+}
+
+// newRequest creates a new request for a OutputService13ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService13ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opOutputService13TestCaseOperation1 = "OperationName"
+
+// OutputService13TestCaseOperation1Request generates a request for the OutputService13TestCaseOperation1 operation.
+func (c *OutputService13ProtocolTest) OutputService13TestCaseOperation1Request(input *OutputService13TestShapeOutputService13TestCaseOperation1Input) (req *aws.Request, output *OutputService13TestShapeOutputShape) {
+ op := &aws.Operation{
+ Name: opOutputService13TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService13TestShapeOutputService13TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService13TestShapeOutputShape{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService13ProtocolTest) OutputService13TestCaseOperation1(input *OutputService13TestShapeOutputService13TestCaseOperation1Input) (*OutputService13TestShapeOutputShape, error) {
+ req, out := c.OutputService13TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService13TestShapeOutputService13TestCaseOperation1Input struct {
+ metadataOutputService13TestShapeOutputService13TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService13TestShapeOutputService13TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService13TestShapeOutputShape struct {
+ Map map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"`
+
+ metadataOutputService13TestShapeOutputShape `json:"-" xml:"-"`
+}
+
+type metadataOutputService13TestShapeOutputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// OutputService14ProtocolTest is a client for OutputService14ProtocolTest.
+type OutputService14ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new OutputService14ProtocolTest client.
+func NewOutputService14ProtocolTest(config *aws.Config) *OutputService14ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "outputservice14protocoltest",
+ APIVersion: "",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(query.Build)
+ service.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return &OutputService14ProtocolTest{service}
+}
+
+// newRequest creates a new request for a OutputService14ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService14ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opOutputService14TestCaseOperation1 = "OperationName"
+
+// OutputService14TestCaseOperation1Request generates a request for the OutputService14TestCaseOperation1 operation.
+func (c *OutputService14ProtocolTest) OutputService14TestCaseOperation1Request(input *OutputService14TestShapeOutputService14TestCaseOperation1Input) (req *aws.Request, output *OutputService14TestShapeOutputShape) {
+ op := &aws.Operation{
+ Name: opOutputService14TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService14TestShapeOutputService14TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService14TestShapeOutputShape{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService14ProtocolTest) OutputService14TestCaseOperation1(input *OutputService14TestShapeOutputService14TestCaseOperation1Input) (*OutputService14TestShapeOutputShape, error) {
+ req, out := c.OutputService14TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService14TestShapeOutputService14TestCaseOperation1Input struct {
+ metadataOutputService14TestShapeOutputService14TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService14TestShapeOutputService14TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService14TestShapeOutputShape struct {
+ Map map[string]*string `locationNameKey:"foo" locationNameValue:"bar" type:"map" flattened:"true"`
+
+ metadataOutputService14TestShapeOutputShape `json:"-" xml:"-"`
+}
+
+type metadataOutputService14TestShapeOutputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//
+// Tests begin here
+//
+
+func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) {
+ svc := NewOutputService1ProtocolTest(nil)
+
+ buf := bytes.NewReader([]byte("myname123falsetrue1.21.3200a2015-01-25T08:00:00Zrequest-id"))
+ req, out := svc.OutputService1TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ query.UnmarshalMeta(req)
+ query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "a", *out.Char)
+ assert.Equal(t, 1.3, *out.Double)
+ assert.Equal(t, false, *out.FalseBool)
+ assert.Equal(t, 1.2, *out.Float)
+ assert.Equal(t, int64(200), *out.Long)
+ assert.Equal(t, int64(123), *out.Num)
+ assert.Equal(t, "myname", *out.Str)
+ assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String())
+ assert.Equal(t, true, *out.TrueBool)
+
+}
+
+func TestOutputService2ProtocolTestNotAllMembersInResponseCase1(t *testing.T) {
+ svc := NewOutputService2ProtocolTest(nil)
+
+ buf := bytes.NewReader([]byte("mynamerequest-id"))
+ req, out := svc.OutputService2TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ query.UnmarshalMeta(req)
+ query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "myname", *out.Str)
+
+}
+
+func TestOutputService3ProtocolTestBlobCase1(t *testing.T) {
+ svc := NewOutputService3ProtocolTest(nil)
+
+ buf := bytes.NewReader([]byte("dmFsdWU=requestid"))
+ req, out := svc.OutputService3TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ query.UnmarshalMeta(req)
+ query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "value", string(out.Blob))
+
+}
+
+func TestOutputService4ProtocolTestListsCase1(t *testing.T) {
+ svc := NewOutputService4ProtocolTest(nil)
+
+ buf := bytes.NewReader([]byte("abc123requestid"))
+ req, out := svc.OutputService4TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ query.UnmarshalMeta(req)
+ query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "abc", *out.ListMember[0])
+ assert.Equal(t, "123", *out.ListMember[1])
+
+}
+
+func TestOutputService5ProtocolTestListWithCustomMemberNameCase1(t *testing.T) {
+ svc := NewOutputService5ProtocolTest(nil)
+
+ buf := bytes.NewReader([]byte("- abc
- 123
requestid"))
+ req, out := svc.OutputService5TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ query.UnmarshalMeta(req)
+ query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "abc", *out.ListMember[0])
+ assert.Equal(t, "123", *out.ListMember[1])
+
+}
+
+func TestOutputService6ProtocolTestFlattenedListCase1(t *testing.T) {
+ svc := NewOutputService6ProtocolTest(nil)
+
+ buf := bytes.NewReader([]byte("abc123requestid"))
+ req, out := svc.OutputService6TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ query.UnmarshalMeta(req)
+ query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "abc", *out.ListMember[0])
+ assert.Equal(t, "123", *out.ListMember[1])
+
+}
+
+func TestOutputService7ProtocolTestFlattenedSingleElementListCase1(t *testing.T) {
+ svc := NewOutputService7ProtocolTest(nil)
+
+ buf := bytes.NewReader([]byte("abcrequestid"))
+ req, out := svc.OutputService7TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ query.UnmarshalMeta(req)
+ query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "abc", *out.ListMember[0])
+
+}
+
+func TestOutputService8ProtocolTestListOfStructuresCase1(t *testing.T) {
+ svc := NewOutputService8ProtocolTest(nil)
+
+ buf := bytes.NewReader([]byte("firstfoofirstbarfirstbazsecondfoosecondbarsecondbaz
requestid"))
+ req, out := svc.OutputService8TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ query.UnmarshalMeta(req)
+ query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "firstbar", *out.List[0].Bar)
+ assert.Equal(t, "firstbaz", *out.List[0].Baz)
+ assert.Equal(t, "firstfoo", *out.List[0].Foo)
+ assert.Equal(t, "secondbar", *out.List[1].Bar)
+ assert.Equal(t, "secondbaz", *out.List[1].Baz)
+ assert.Equal(t, "secondfoo", *out.List[1].Foo)
+
+}
+
+func TestOutputService9ProtocolTestFlattenedListOfStructuresCase1(t *testing.T) {
+ svc := NewOutputService9ProtocolTest(nil)
+
+ buf := bytes.NewReader([]byte("firstfoofirstbarfirstbaz
secondfoosecondbarsecondbaz
requestid"))
+ req, out := svc.OutputService9TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ query.UnmarshalMeta(req)
+ query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "firstbar", *out.List[0].Bar)
+ assert.Equal(t, "firstbaz", *out.List[0].Baz)
+ assert.Equal(t, "firstfoo", *out.List[0].Foo)
+ assert.Equal(t, "secondbar", *out.List[1].Bar)
+ assert.Equal(t, "secondbaz", *out.List[1].Baz)
+ assert.Equal(t, "secondfoo", *out.List[1].Foo)
+
+}
+
+func TestOutputService10ProtocolTestFlattenedListWithLocationNameCase1(t *testing.T) {
+ svc := NewOutputService10ProtocolTest(nil)
+
+ buf := bytes.NewReader([]byte("abrequestid"))
+ req, out := svc.OutputService10TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ query.UnmarshalMeta(req)
+ query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "a", *out.List[0])
+ assert.Equal(t, "b", *out.List[1])
+
+}
+
+func TestOutputService11ProtocolTestNormalMapCase1(t *testing.T) {
+ svc := NewOutputService11ProtocolTest(nil)
+
+ buf := bytes.NewReader([]byte("requestid"))
+ req, out := svc.OutputService11TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ query.UnmarshalMeta(req)
+ query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "bam", *out.Map["baz"].Foo)
+ assert.Equal(t, "bar", *out.Map["qux"].Foo)
+
+}
+
+func TestOutputService12ProtocolTestFlattenedMapCase1(t *testing.T) {
+ svc := NewOutputService12ProtocolTest(nil)
+
+ buf := bytes.NewReader([]byte("requestid"))
+ req, out := svc.OutputService12TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ query.UnmarshalMeta(req)
+ query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "bam", *out.Map["baz"])
+ assert.Equal(t, "bar", *out.Map["qux"])
+
+}
+
+func TestOutputService13ProtocolTestFlattenedMapInShapeDefinitionCase1(t *testing.T) {
+ svc := NewOutputService13ProtocolTest(nil)
+
+ buf := bytes.NewReader([]byte("quxbarrequestid"))
+ req, out := svc.OutputService13TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ query.UnmarshalMeta(req)
+ query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "bar", *out.Map["qux"])
+
+}
+
+func TestOutputService14ProtocolTestNamedMapCase1(t *testing.T) {
+ svc := NewOutputService14ProtocolTest(nil)
+
+ buf := bytes.NewReader([]byte("requestid"))
+ req, out := svc.OutputService14TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ query.UnmarshalMeta(req)
+ query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "bam", *out.Map["baz"])
+ assert.Equal(t, "bar", *out.Map["qux"])
+
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/build.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/build.go
new file mode 100644
index 0000000..cd5eef2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/build.go
@@ -0,0 +1,212 @@
+// Package rest provides RESTful serialisation of AWS requests and responses.
+package rest
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "net/url"
+ "path"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// RFC822 returns an RFC822 formatted timestamp for AWS protocols
+const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT"
+
+// Whether the byte value can be sent without escaping in AWS URLs
+var noEscape [256]bool
+
+func init() {
+ for i := 0; i < len(noEscape); i++ {
+ // AWS expects every character except these to be escaped
+ noEscape[i] = (i >= 'A' && i <= 'Z') ||
+ (i >= 'a' && i <= 'z') ||
+ (i >= '0' && i <= '9') ||
+ i == '-' ||
+ i == '.' ||
+ i == '_' ||
+ i == '~'
+ }
+}
+
+// Build builds the REST component of a service request.
+func Build(r *aws.Request) {
+ if r.ParamsFilled() {
+ v := reflect.ValueOf(r.Params).Elem()
+ buildLocationElements(r, v)
+ buildBody(r, v)
+ }
+}
+
+func buildLocationElements(r *aws.Request, v reflect.Value) {
+ query := r.HTTPRequest.URL.Query()
+
+ for i := 0; i < v.NumField(); i++ {
+ m := v.Field(i)
+ if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) {
+ continue
+ }
+
+ if m.IsValid() {
+ field := v.Type().Field(i)
+ name := field.Tag.Get("locationName")
+ if name == "" {
+ name = field.Name
+ }
+ if m.Kind() == reflect.Ptr {
+ m = m.Elem()
+ }
+ if !m.IsValid() {
+ continue
+ }
+
+ switch field.Tag.Get("location") {
+ case "headers": // header maps
+ buildHeaderMap(r, m, field.Tag.Get("locationName"))
+ case "header":
+ buildHeader(r, m, name)
+ case "uri":
+ buildURI(r, m, name)
+ case "querystring":
+ buildQueryString(r, m, name, query)
+ }
+ }
+ if r.Error != nil {
+ return
+ }
+ }
+
+ r.HTTPRequest.URL.RawQuery = query.Encode()
+ updatePath(r.HTTPRequest.URL, r.HTTPRequest.URL.Path)
+}
+
+func buildBody(r *aws.Request, v reflect.Value) {
+ if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ pfield, _ := v.Type().FieldByName(payloadName)
+ if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
+ payload := reflect.Indirect(v.FieldByName(payloadName))
+ if payload.IsValid() && payload.Interface() != nil {
+ switch reader := payload.Interface().(type) {
+ case io.ReadSeeker:
+ r.SetReaderBody(reader)
+ case []byte:
+ r.SetBufferBody(reader)
+ case string:
+ r.SetStringBody(reader)
+ default:
+ r.Error = awserr.New("SerializationError",
+ "failed to encode REST request",
+ fmt.Errorf("unknown payload type %s", payload.Type()))
+ }
+ }
+ }
+ }
+ }
+}
+
+func buildHeader(r *aws.Request, v reflect.Value, name string) {
+ str, err := convertType(v)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to encode REST request", err)
+ } else if str != nil {
+ r.HTTPRequest.Header.Add(name, *str)
+ }
+}
+
+func buildHeaderMap(r *aws.Request, v reflect.Value, prefix string) {
+ for _, key := range v.MapKeys() {
+ str, err := convertType(v.MapIndex(key))
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to encode REST request", err)
+ } else if str != nil {
+ r.HTTPRequest.Header.Add(prefix+key.String(), *str)
+ }
+ }
+}
+
+func buildURI(r *aws.Request, v reflect.Value, name string) {
+ value, err := convertType(v)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to encode REST request", err)
+ } else if value != nil {
+ uri := r.HTTPRequest.URL.Path
+ uri = strings.Replace(uri, "{"+name+"}", EscapePath(*value, true), -1)
+ uri = strings.Replace(uri, "{"+name+"+}", EscapePath(*value, false), -1)
+ r.HTTPRequest.URL.Path = uri
+ }
+}
+
+func buildQueryString(r *aws.Request, v reflect.Value, name string, query url.Values) {
+ str, err := convertType(v)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to encode REST request", err)
+ } else if str != nil {
+ query.Set(name, *str)
+ }
+}
+
+func updatePath(url *url.URL, urlPath string) {
+ scheme, query := url.Scheme, url.RawQuery
+
+ // clean up path
+ urlPath = path.Clean(urlPath)
+
+ // get formatted URL minus scheme so we can build this into Opaque
+ url.Scheme, url.Path, url.RawQuery = "", "", ""
+ s := url.String()
+ url.Scheme = scheme
+ url.RawQuery = query
+
+ // build opaque URI
+ url.Opaque = s + urlPath
+}
+
+// EscapePath escapes part of a URL path in Amazon style
+func EscapePath(path string, encodeSep bool) string {
+ var buf bytes.Buffer
+ for i := 0; i < len(path); i++ {
+ c := path[i]
+ if noEscape[c] || (c == '/' && !encodeSep) {
+ buf.WriteByte(c)
+ } else {
+ buf.WriteByte('%')
+ buf.WriteString(strings.ToUpper(strconv.FormatUint(uint64(c), 16)))
+ }
+ }
+ return buf.String()
+}
+
+func convertType(v reflect.Value) (*string, error) {
+ v = reflect.Indirect(v)
+ if !v.IsValid() {
+ return nil, nil
+ }
+
+ var str string
+ switch value := v.Interface().(type) {
+ case string:
+ str = value
+ case []byte:
+ str = base64.StdEncoding.EncodeToString(value)
+ case bool:
+ str = strconv.FormatBool(value)
+ case int64:
+ str = strconv.FormatInt(value, 10)
+ case float64:
+ str = strconv.FormatFloat(value, 'f', -1, 64)
+ case time.Time:
+ str = value.UTC().Format(RFC822)
+ default:
+ err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
+ return nil, err
+ }
+ return &str, nil
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/payload.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/payload.go
new file mode 100644
index 0000000..1f603bb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/payload.go
@@ -0,0 +1,45 @@
+package rest
+
+import "reflect"
+
+// PayloadMember returns the payload field member of i if there is one, or nil.
+func PayloadMember(i interface{}) interface{} {
+ if i == nil {
+ return nil
+ }
+
+ v := reflect.ValueOf(i).Elem()
+ if !v.IsValid() {
+ return nil
+ }
+ if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ field, _ := v.Type().FieldByName(payloadName)
+ if field.Tag.Get("type") != "structure" {
+ return nil
+ }
+
+ payload := v.FieldByName(payloadName)
+ if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) {
+ return payload.Interface()
+ }
+ }
+ }
+ return nil
+}
+
+// PayloadType returns the type of a payload field member of i if there is one, or "".
+func PayloadType(i interface{}) string {
+ v := reflect.Indirect(reflect.ValueOf(i))
+ if !v.IsValid() {
+ return ""
+ }
+ if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ if member, ok := v.Type().FieldByName(payloadName); ok {
+ return member.Tag.Get("type")
+ }
+ }
+ }
+ return ""
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/unmarshal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/unmarshal.go
new file mode 100644
index 0000000..a4155f1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/unmarshal.go
@@ -0,0 +1,174 @@
+package rest
+
+import (
+ "encoding/base64"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// Unmarshal unmarshals the REST component of a response in a REST service.
+func Unmarshal(r *aws.Request) {
+ if r.DataFilled() {
+ v := reflect.Indirect(reflect.ValueOf(r.Data))
+ unmarshalBody(r, v)
+ unmarshalLocationElements(r, v)
+ }
+}
+
+func unmarshalBody(r *aws.Request, v reflect.Value) {
+ if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ pfield, _ := v.Type().FieldByName(payloadName)
+ if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
+ payload := v.FieldByName(payloadName)
+ if payload.IsValid() {
+ switch payload.Interface().(type) {
+ case []byte:
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+ } else {
+ payload.Set(reflect.ValueOf(b))
+ }
+ case *string:
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+ } else {
+ str := string(b)
+ payload.Set(reflect.ValueOf(&str))
+ }
+ default:
+ switch payload.Type().String() {
+ case "io.ReadSeeker":
+ payload.Set(reflect.ValueOf(aws.ReadSeekCloser(r.HTTPResponse.Body)))
+ case "aws.ReadSeekCloser", "io.ReadCloser":
+ payload.Set(reflect.ValueOf(r.HTTPResponse.Body))
+ default:
+ r.Error = awserr.New("SerializationError",
+ "failed to decode REST response",
+ fmt.Errorf("unknown payload type %s", payload.Type()))
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+func unmarshalLocationElements(r *aws.Request, v reflect.Value) {
+ for i := 0; i < v.NumField(); i++ {
+ m, field := v.Field(i), v.Type().Field(i)
+ if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) {
+ continue
+ }
+
+ if m.IsValid() {
+ name := field.Tag.Get("locationName")
+ if name == "" {
+ name = field.Name
+ }
+
+ switch field.Tag.Get("location") {
+ case "statusCode":
+ unmarshalStatusCode(m, r.HTTPResponse.StatusCode)
+ case "header":
+ err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name))
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+ break
+ }
+ case "headers":
+ prefix := field.Tag.Get("locationName")
+ err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+ break
+ }
+ }
+ }
+ if r.Error != nil {
+ return
+ }
+ }
+}
+
+func unmarshalStatusCode(v reflect.Value, statusCode int) {
+ if !v.IsValid() {
+ return
+ }
+
+ switch v.Interface().(type) {
+ case *int64:
+ s := int64(statusCode)
+ v.Set(reflect.ValueOf(&s))
+ }
+}
+
+func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error {
+ switch r.Interface().(type) {
+ case map[string]*string: // we only support string map value types
+ out := map[string]*string{}
+ for k, v := range headers {
+ k = http.CanonicalHeaderKey(k)
+ if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) {
+ out[k[len(prefix):]] = &v[0]
+ }
+ }
+ r.Set(reflect.ValueOf(out))
+ }
+ return nil
+}
+
+func unmarshalHeader(v reflect.Value, header string) error {
+ if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) {
+ return nil
+ }
+
+ switch v.Interface().(type) {
+ case *string:
+ v.Set(reflect.ValueOf(&header))
+ case []byte:
+ b, err := base64.StdEncoding.DecodeString(header)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&b))
+ case *bool:
+ b, err := strconv.ParseBool(header)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&b))
+ case *int64:
+ i, err := strconv.ParseInt(header, 10, 64)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&i))
+ case *float64:
+ f, err := strconv.ParseFloat(header, 64)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&f))
+ case *time.Time:
+ t, err := time.Parse(RFC822, header)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&t))
+ default:
+ err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
+ return err
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/restxml/build_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/restxml/build_test.go
new file mode 100644
index 0000000..6fcd3f8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/restxml/build_test.go
@@ -0,0 +1,2736 @@
+package restxml_test
+
+import (
+ "bytes"
+ "encoding/json"
+ "encoding/xml"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/internal/protocol/restxml"
+ "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil"
+ "github.com/aws/aws-sdk-go/internal/signer/v4"
+ "github.com/aws/aws-sdk-go/internal/util"
+ "github.com/stretchr/testify/assert"
+)
+
+var _ bytes.Buffer // always import bytes
+var _ http.Request
+var _ json.Marshaler
+var _ time.Time
+var _ xmlutil.XMLNode
+var _ xml.Attr
+var _ = ioutil.Discard
+var _ = util.Trim("")
+var _ = url.Values{}
+var _ = io.EOF
+
+// InputService1ProtocolTest is a client for InputService1ProtocolTest.
+type InputService1ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new InputService1ProtocolTest client.
+func NewInputService1ProtocolTest(config *aws.Config) *InputService1ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "inputservice1protocoltest",
+ APIVersion: "2014-01-01",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return &InputService1ProtocolTest{service}
+}
+
+// newRequest creates a new request for a InputService1ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService1ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opInputService1TestCaseOperation1 = "OperationName"
+
+// InputService1TestCaseOperation1Request generates a request for the InputService1TestCaseOperation1 operation.
+func (c *InputService1ProtocolTest) InputService1TestCaseOperation1Request(input *InputService1TestShapeInputShape) (req *aws.Request, output *InputService1TestShapeInputService1TestCaseOperation1Output) {
+ op := &aws.Operation{
+ Name: opInputService1TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/2014-01-01/hostedzone",
+ }
+
+ if input == nil {
+ input = &InputService1TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService1TestShapeInputService1TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService1ProtocolTest) InputService1TestCaseOperation1(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation1Output, error) {
+ req, out := c.InputService1TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService1TestCaseOperation2 = "OperationName"
+
+// InputService1TestCaseOperation2Request generates a request for the InputService1TestCaseOperation2 operation.
+func (c *InputService1ProtocolTest) InputService1TestCaseOperation2Request(input *InputService1TestShapeInputShape) (req *aws.Request, output *InputService1TestShapeInputService1TestCaseOperation2Output) {
+ op := &aws.Operation{
+ Name: opInputService1TestCaseOperation2,
+ HTTPMethod: "PUT",
+ HTTPPath: "/2014-01-01/hostedzone",
+ }
+
+ if input == nil {
+ input = &InputService1TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService1TestShapeInputService1TestCaseOperation2Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService1ProtocolTest) InputService1TestCaseOperation2(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation2Output, error) {
+ req, out := c.InputService1TestCaseOperation2Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService1TestShapeInputService1TestCaseOperation1Output struct {
+ metadataInputService1TestShapeInputService1TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService1TestShapeInputService1TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService1TestShapeInputService1TestCaseOperation2Output struct {
+ metadataInputService1TestShapeInputService1TestCaseOperation2Output `json:"-" xml:"-"`
+}
+
+type metadataInputService1TestShapeInputService1TestCaseOperation2Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService1TestShapeInputShape struct {
+ Description *string `type:"string"`
+
+ Name *string `type:"string"`
+
+ metadataInputService1TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService1TestShapeInputShape struct {
+ SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"`
+}
+
+// InputService2ProtocolTest is a client for InputService2ProtocolTest.
+type InputService2ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new InputService2ProtocolTest client.
+func NewInputService2ProtocolTest(config *aws.Config) *InputService2ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "inputservice2protocoltest",
+ APIVersion: "2014-01-01",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return &InputService2ProtocolTest{service}
+}
+
+// newRequest creates a new request for a InputService2ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService2ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opInputService2TestCaseOperation1 = "OperationName"
+
+// InputService2TestCaseOperation1Request generates a request for the InputService2TestCaseOperation1 operation.
+func (c *InputService2ProtocolTest) InputService2TestCaseOperation1Request(input *InputService2TestShapeInputShape) (req *aws.Request, output *InputService2TestShapeInputService2TestCaseOperation1Output) {
+ op := &aws.Operation{
+ Name: opInputService2TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/2014-01-01/hostedzone",
+ }
+
+ if input == nil {
+ input = &InputService2TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService2TestShapeInputService2TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService2ProtocolTest) InputService2TestCaseOperation1(input *InputService2TestShapeInputShape) (*InputService2TestShapeInputService2TestCaseOperation1Output, error) {
+ req, out := c.InputService2TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService2TestShapeInputService2TestCaseOperation1Output struct {
+ metadataInputService2TestShapeInputService2TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService2TestShapeInputService2TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService2TestShapeInputShape struct {
+ First *bool `type:"boolean"`
+
+ Fourth *int64 `type:"integer"`
+
+ Second *bool `type:"boolean"`
+
+ Third *float64 `type:"float"`
+
+ metadataInputService2TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService2TestShapeInputShape struct {
+ SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"`
+}
+
+// InputService3ProtocolTest is a client for InputService3ProtocolTest.
+type InputService3ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new InputService3ProtocolTest client.
+func NewInputService3ProtocolTest(config *aws.Config) *InputService3ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "inputservice3protocoltest",
+ APIVersion: "2014-01-01",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return &InputService3ProtocolTest{service}
+}
+
+// newRequest creates a new request for a InputService3ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService3ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opInputService3TestCaseOperation1 = "OperationName"
+
+// InputService3TestCaseOperation1Request generates a request for the InputService3TestCaseOperation1 operation.
+func (c *InputService3ProtocolTest) InputService3TestCaseOperation1Request(input *InputService3TestShapeInputShape) (req *aws.Request, output *InputService3TestShapeInputService3TestCaseOperation1Output) {
+ op := &aws.Operation{
+ Name: opInputService3TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/2014-01-01/hostedzone",
+ }
+
+ if input == nil {
+ input = &InputService3TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService3TestShapeInputService3TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService3ProtocolTest) InputService3TestCaseOperation1(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation1Output, error) {
+ req, out := c.InputService3TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService3TestShapeInputService3TestCaseOperation1Output struct {
+ metadataInputService3TestShapeInputService3TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService3TestShapeInputService3TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService3TestShapeInputShape struct {
+ Description *string `type:"string"`
+
+ SubStructure *InputService3TestShapeSubStructure `type:"structure"`
+
+ metadataInputService3TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService3TestShapeInputShape struct {
+ SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"`
+}
+
+type InputService3TestShapeSubStructure struct {
+ Bar *string `type:"string"`
+
+ Foo *string `type:"string"`
+
+ metadataInputService3TestShapeSubStructure `json:"-" xml:"-"`
+}
+
+type metadataInputService3TestShapeSubStructure struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// InputService4ProtocolTest is a client for InputService4ProtocolTest.
+type InputService4ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new InputService4ProtocolTest client.
+func NewInputService4ProtocolTest(config *aws.Config) *InputService4ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "inputservice4protocoltest",
+ APIVersion: "2014-01-01",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return &InputService4ProtocolTest{service}
+}
+
+// newRequest creates a new request for a InputService4ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService4ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opInputService4TestCaseOperation1 = "OperationName"
+
+// InputService4TestCaseOperation1Request generates a request for the InputService4TestCaseOperation1 operation.
+func (c *InputService4ProtocolTest) InputService4TestCaseOperation1Request(input *InputService4TestShapeInputShape) (req *aws.Request, output *InputService4TestShapeInputService4TestCaseOperation1Output) {
+ op := &aws.Operation{
+ Name: opInputService4TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/2014-01-01/hostedzone",
+ }
+
+ if input == nil {
+ input = &InputService4TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService4TestShapeInputService4TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService4ProtocolTest) InputService4TestCaseOperation1(input *InputService4TestShapeInputShape) (*InputService4TestShapeInputService4TestCaseOperation1Output, error) {
+ req, out := c.InputService4TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService4TestShapeInputService4TestCaseOperation1Output struct {
+ metadataInputService4TestShapeInputService4TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService4TestShapeInputService4TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService4TestShapeInputShape struct {
+ Description *string `type:"string"`
+
+ SubStructure *InputService4TestShapeSubStructure `type:"structure"`
+
+ metadataInputService4TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService4TestShapeInputShape struct {
+ SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"`
+}
+
+type InputService4TestShapeSubStructure struct {
+ Bar *string `type:"string"`
+
+ Foo *string `type:"string"`
+
+ metadataInputService4TestShapeSubStructure `json:"-" xml:"-"`
+}
+
+type metadataInputService4TestShapeSubStructure struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// InputService5ProtocolTest is a client for InputService5ProtocolTest.
+type InputService5ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new InputService5ProtocolTest client.
+func NewInputService5ProtocolTest(config *aws.Config) *InputService5ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "inputservice5protocoltest",
+ APIVersion: "2014-01-01",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return &InputService5ProtocolTest{service}
+}
+
+// newRequest creates a new request for a InputService5ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService5ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opInputService5TestCaseOperation1 = "OperationName"
+
+// InputService5TestCaseOperation1Request generates a request for the InputService5TestCaseOperation1 operation.
+func (c *InputService5ProtocolTest) InputService5TestCaseOperation1Request(input *InputService5TestShapeInputShape) (req *aws.Request, output *InputService5TestShapeInputService5TestCaseOperation1Output) {
+ op := &aws.Operation{
+ Name: opInputService5TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/2014-01-01/hostedzone",
+ }
+
+ if input == nil {
+ input = &InputService5TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService5TestShapeInputService5TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService5ProtocolTest) InputService5TestCaseOperation1(input *InputService5TestShapeInputShape) (*InputService5TestShapeInputService5TestCaseOperation1Output, error) {
+ req, out := c.InputService5TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService5TestShapeInputService5TestCaseOperation1Output struct {
+ metadataInputService5TestShapeInputService5TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService5TestShapeInputService5TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService5TestShapeInputShape struct {
+ ListParam []*string `type:"list"`
+
+ metadataInputService5TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService5TestShapeInputShape struct {
+ SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"`
+}
+
+// InputService6ProtocolTest is a client for InputService6ProtocolTest.
+type InputService6ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new InputService6ProtocolTest client.
+func NewInputService6ProtocolTest(config *aws.Config) *InputService6ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "inputservice6protocoltest",
+ APIVersion: "2014-01-01",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return &InputService6ProtocolTest{service}
+}
+
+// newRequest creates a new request for a InputService6ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService6ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opInputService6TestCaseOperation1 = "OperationName"
+
+// InputService6TestCaseOperation1Request generates a request for the InputService6TestCaseOperation1 operation.
+func (c *InputService6ProtocolTest) InputService6TestCaseOperation1Request(input *InputService6TestShapeInputShape) (req *aws.Request, output *InputService6TestShapeInputService6TestCaseOperation1Output) {
+ op := &aws.Operation{
+ Name: opInputService6TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/2014-01-01/hostedzone",
+ }
+
+ if input == nil {
+ input = &InputService6TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService6TestShapeInputService6TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService6ProtocolTest) InputService6TestCaseOperation1(input *InputService6TestShapeInputShape) (*InputService6TestShapeInputService6TestCaseOperation1Output, error) {
+ req, out := c.InputService6TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService6TestShapeInputService6TestCaseOperation1Output struct {
+ metadataInputService6TestShapeInputService6TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService6TestShapeInputService6TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService6TestShapeInputShape struct {
+ ListParam []*string `locationName:"AlternateName" locationNameList:"NotMember" type:"list"`
+
+ metadataInputService6TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService6TestShapeInputShape struct {
+ SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"`
+}
+
+// InputService7ProtocolTest is a client for InputService7ProtocolTest.
+type InputService7ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new InputService7ProtocolTest client.
+func NewInputService7ProtocolTest(config *aws.Config) *InputService7ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "inputservice7protocoltest",
+ APIVersion: "2014-01-01",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return &InputService7ProtocolTest{service}
+}
+
+// newRequest creates a new request for a InputService7ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService7ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opInputService7TestCaseOperation1 = "OperationName"
+
+// InputService7TestCaseOperation1Request generates a request for the InputService7TestCaseOperation1 operation.
+func (c *InputService7ProtocolTest) InputService7TestCaseOperation1Request(input *InputService7TestShapeInputShape) (req *aws.Request, output *InputService7TestShapeInputService7TestCaseOperation1Output) {
+ op := &aws.Operation{
+ Name: opInputService7TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/2014-01-01/hostedzone",
+ }
+
+ if input == nil {
+ input = &InputService7TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService7TestShapeInputService7TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService7ProtocolTest) InputService7TestCaseOperation1(input *InputService7TestShapeInputShape) (*InputService7TestShapeInputService7TestCaseOperation1Output, error) {
+ req, out := c.InputService7TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService7TestShapeInputService7TestCaseOperation1Output struct {
+ metadataInputService7TestShapeInputService7TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService7TestShapeInputService7TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService7TestShapeInputShape struct {
+ ListParam []*string `type:"list" flattened:"true"`
+
+ metadataInputService7TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService7TestShapeInputShape struct {
+ SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"`
+}
+
+// InputService8ProtocolTest is a client for InputService8ProtocolTest.
+type InputService8ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new InputService8ProtocolTest client.
+func NewInputService8ProtocolTest(config *aws.Config) *InputService8ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "inputservice8protocoltest",
+ APIVersion: "2014-01-01",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return &InputService8ProtocolTest{service}
+}
+
+// newRequest creates a new request for a InputService8ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService8ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opInputService8TestCaseOperation1 = "OperationName"
+
+// InputService8TestCaseOperation1Request generates a request for the InputService8TestCaseOperation1 operation.
+func (c *InputService8ProtocolTest) InputService8TestCaseOperation1Request(input *InputService8TestShapeInputShape) (req *aws.Request, output *InputService8TestShapeInputService8TestCaseOperation1Output) {
+ op := &aws.Operation{
+ Name: opInputService8TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/2014-01-01/hostedzone",
+ }
+
+ if input == nil {
+ input = &InputService8TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService8TestShapeInputService8TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService8ProtocolTest) InputService8TestCaseOperation1(input *InputService8TestShapeInputShape) (*InputService8TestShapeInputService8TestCaseOperation1Output, error) {
+ req, out := c.InputService8TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService8TestShapeInputService8TestCaseOperation1Output struct {
+ metadataInputService8TestShapeInputService8TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService8TestShapeInputService8TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService8TestShapeInputShape struct {
+ ListParam []*string `locationName:"item" type:"list" flattened:"true"`
+
+ metadataInputService8TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService8TestShapeInputShape struct {
+ SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"`
+}
+
+// InputService9ProtocolTest is a client for InputService9ProtocolTest.
+type InputService9ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new InputService9ProtocolTest client.
+func NewInputService9ProtocolTest(config *aws.Config) *InputService9ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "inputservice9protocoltest",
+ APIVersion: "2014-01-01",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return &InputService9ProtocolTest{service}
+}
+
+// newRequest creates a new request for a InputService9ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService9ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opInputService9TestCaseOperation1 = "OperationName"
+
+// InputService9TestCaseOperation1Request generates a request for the InputService9TestCaseOperation1 operation.
+func (c *InputService9ProtocolTest) InputService9TestCaseOperation1Request(input *InputService9TestShapeInputShape) (req *aws.Request, output *InputService9TestShapeInputService9TestCaseOperation1Output) {
+ op := &aws.Operation{
+ Name: opInputService9TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/2014-01-01/hostedzone",
+ }
+
+ if input == nil {
+ input = &InputService9TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService9TestShapeInputService9TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService9ProtocolTest) InputService9TestCaseOperation1(input *InputService9TestShapeInputShape) (*InputService9TestShapeInputService9TestCaseOperation1Output, error) {
+ req, out := c.InputService9TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService9TestShapeInputService9TestCaseOperation1Output struct {
+ metadataInputService9TestShapeInputService9TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService9TestShapeInputService9TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService9TestShapeInputShape struct {
+ ListParam []*InputService9TestShapeSingleFieldStruct `locationName:"item" type:"list" flattened:"true"`
+
+ metadataInputService9TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService9TestShapeInputShape struct {
+ SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"`
+}
+
+type InputService9TestShapeSingleFieldStruct struct {
+ Element *string `locationName:"value" type:"string"`
+
+ metadataInputService9TestShapeSingleFieldStruct `json:"-" xml:"-"`
+}
+
+type metadataInputService9TestShapeSingleFieldStruct struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// InputService10ProtocolTest is a client for InputService10ProtocolTest.
+type InputService10ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new InputService10ProtocolTest client.
+func NewInputService10ProtocolTest(config *aws.Config) *InputService10ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "inputservice10protocoltest",
+ APIVersion: "2014-01-01",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return &InputService10ProtocolTest{service}
+}
+
+// newRequest creates a new request for a InputService10ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService10ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opInputService10TestCaseOperation1 = "OperationName"
+
+// InputService10TestCaseOperation1Request generates a request for the InputService10TestCaseOperation1 operation.
+func (c *InputService10ProtocolTest) InputService10TestCaseOperation1Request(input *InputService10TestShapeInputShape) (req *aws.Request, output *InputService10TestShapeInputService10TestCaseOperation1Output) {
+ op := &aws.Operation{
+ Name: opInputService10TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/2014-01-01/hostedzone",
+ }
+
+ if input == nil {
+ input = &InputService10TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService10TestShapeInputService10TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService10ProtocolTest) InputService10TestCaseOperation1(input *InputService10TestShapeInputShape) (*InputService10TestShapeInputService10TestCaseOperation1Output, error) {
+ req, out := c.InputService10TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService10TestShapeInputService10TestCaseOperation1Output struct {
+ metadataInputService10TestShapeInputService10TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService10TestShapeInputService10TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService10TestShapeInputShape struct {
+ StructureParam *InputService10TestShapeStructureShape `type:"structure"`
+
+ metadataInputService10TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService10TestShapeInputShape struct {
+ SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"`
+}
+
+type InputService10TestShapeStructureShape struct {
+ B []byte `locationName:"b" type:"blob"`
+
+ T *time.Time `locationName:"t" type:"timestamp" timestampFormat:"iso8601"`
+
+ metadataInputService10TestShapeStructureShape `json:"-" xml:"-"`
+}
+
+type metadataInputService10TestShapeStructureShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// InputService11ProtocolTest is a client for InputService11ProtocolTest.
+type InputService11ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new InputService11ProtocolTest client.
+func NewInputService11ProtocolTest(config *aws.Config) *InputService11ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "inputservice11protocoltest",
+ APIVersion: "2014-01-01",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return &InputService11ProtocolTest{service}
+}
+
+// newRequest creates a new request for a InputService11ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService11ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opInputService11TestCaseOperation1 = "OperationName"
+
+// InputService11TestCaseOperation1Request generates a request for the InputService11TestCaseOperation1 operation.
+func (c *InputService11ProtocolTest) InputService11TestCaseOperation1Request(input *InputService11TestShapeInputShape) (req *aws.Request, output *InputService11TestShapeInputService11TestCaseOperation1Output) {
+ op := &aws.Operation{
+ Name: opInputService11TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &InputService11TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService11TestShapeInputService11TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService11ProtocolTest) InputService11TestCaseOperation1(input *InputService11TestShapeInputShape) (*InputService11TestShapeInputService11TestCaseOperation1Output, error) {
+ req, out := c.InputService11TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService11TestShapeInputService11TestCaseOperation1Output struct {
+ metadataInputService11TestShapeInputService11TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService11TestShapeInputService11TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService11TestShapeInputShape struct {
+ Foo map[string]*string `location:"headers" locationName:"x-foo-" type:"map"`
+
+ metadataInputService11TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService11TestShapeInputShape struct {
+ SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"`
+}
+
+// InputService12ProtocolTest is a client for InputService12ProtocolTest.
+type InputService12ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new InputService12ProtocolTest client.
+func NewInputService12ProtocolTest(config *aws.Config) *InputService12ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "inputservice12protocoltest",
+ APIVersion: "2014-01-01",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return &InputService12ProtocolTest{service}
+}
+
+// newRequest creates a new request for a InputService12ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService12ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opInputService12TestCaseOperation1 = "OperationName"
+
+// InputService12TestCaseOperation1Request generates a request for the InputService12TestCaseOperation1 operation.
+func (c *InputService12ProtocolTest) InputService12TestCaseOperation1Request(input *InputService12TestShapeInputShape) (req *aws.Request, output *InputService12TestShapeInputService12TestCaseOperation1Output) {
+ op := &aws.Operation{
+ Name: opInputService12TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &InputService12TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService12TestShapeInputService12TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService12ProtocolTest) InputService12TestCaseOperation1(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation1Output, error) {
+ req, out := c.InputService12TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService12TestShapeInputService12TestCaseOperation1Output struct {
+ metadataInputService12TestShapeInputService12TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService12TestShapeInputService12TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService12TestShapeInputShape struct {
+ Foo *string `locationName:"foo" type:"string"`
+
+ metadataInputService12TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService12TestShapeInputShape struct {
+ SDKShapeTraits bool `type:"structure" payload:"Foo"`
+}
+
+// InputService13ProtocolTest is a client for InputService13ProtocolTest.
+type InputService13ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new InputService13ProtocolTest client.
+func NewInputService13ProtocolTest(config *aws.Config) *InputService13ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "inputservice13protocoltest",
+ APIVersion: "2014-01-01",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return &InputService13ProtocolTest{service}
+}
+
+// newRequest creates a new request for a InputService13ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService13ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opInputService13TestCaseOperation1 = "OperationName"
+
+// InputService13TestCaseOperation1Request generates a request for the InputService13TestCaseOperation1 operation.
+func (c *InputService13ProtocolTest) InputService13TestCaseOperation1Request(input *InputService13TestShapeInputShape) (req *aws.Request, output *InputService13TestShapeInputService13TestCaseOperation1Output) {
+ op := &aws.Operation{
+ Name: opInputService13TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &InputService13TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService13TestShapeInputService13TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService13ProtocolTest) InputService13TestCaseOperation1(input *InputService13TestShapeInputShape) (*InputService13TestShapeInputService13TestCaseOperation1Output, error) {
+ req, out := c.InputService13TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService13TestCaseOperation2 = "OperationName"
+
+// InputService13TestCaseOperation2Request generates a request for the InputService13TestCaseOperation2 operation.
+func (c *InputService13ProtocolTest) InputService13TestCaseOperation2Request(input *InputService13TestShapeInputShape) (req *aws.Request, output *InputService13TestShapeInputService13TestCaseOperation2Output) {
+ op := &aws.Operation{
+ Name: opInputService13TestCaseOperation2,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &InputService13TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService13TestShapeInputService13TestCaseOperation2Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService13ProtocolTest) InputService13TestCaseOperation2(input *InputService13TestShapeInputShape) (*InputService13TestShapeInputService13TestCaseOperation2Output, error) {
+ req, out := c.InputService13TestCaseOperation2Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService13TestShapeInputService13TestCaseOperation1Output struct {
+ metadataInputService13TestShapeInputService13TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService13TestShapeInputService13TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService13TestShapeInputService13TestCaseOperation2Output struct {
+ metadataInputService13TestShapeInputService13TestCaseOperation2Output `json:"-" xml:"-"`
+}
+
+type metadataInputService13TestShapeInputService13TestCaseOperation2Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService13TestShapeInputShape struct {
+ Foo []byte `locationName:"foo" type:"blob"`
+
+ metadataInputService13TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService13TestShapeInputShape struct {
+ SDKShapeTraits bool `type:"structure" payload:"Foo"`
+}
+
+// InputService14ProtocolTest is a client for InputService14ProtocolTest.
+type InputService14ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new InputService14ProtocolTest client.
+func NewInputService14ProtocolTest(config *aws.Config) *InputService14ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "inputservice14protocoltest",
+ APIVersion: "2014-01-01",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return &InputService14ProtocolTest{service}
+}
+
+// newRequest creates a new request for a InputService14ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService14ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opInputService14TestCaseOperation1 = "OperationName"
+
+// InputService14TestCaseOperation1Request generates a request for the InputService14TestCaseOperation1 operation.
+func (c *InputService14ProtocolTest) InputService14TestCaseOperation1Request(input *InputService14TestShapeInputShape) (req *aws.Request, output *InputService14TestShapeInputService14TestCaseOperation1Output) {
+ op := &aws.Operation{
+ Name: opInputService14TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &InputService14TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService14TestShapeInputService14TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService14ProtocolTest) InputService14TestCaseOperation1(input *InputService14TestShapeInputShape) (*InputService14TestShapeInputService14TestCaseOperation1Output, error) {
+ req, out := c.InputService14TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService14TestCaseOperation2 = "OperationName"
+
+// InputService14TestCaseOperation2Request generates a request for the InputService14TestCaseOperation2 operation.
+func (c *InputService14ProtocolTest) InputService14TestCaseOperation2Request(input *InputService14TestShapeInputShape) (req *aws.Request, output *InputService14TestShapeInputService14TestCaseOperation2Output) {
+ op := &aws.Operation{
+ Name: opInputService14TestCaseOperation2,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &InputService14TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService14TestShapeInputService14TestCaseOperation2Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService14ProtocolTest) InputService14TestCaseOperation2(input *InputService14TestShapeInputShape) (*InputService14TestShapeInputService14TestCaseOperation2Output, error) {
+ req, out := c.InputService14TestCaseOperation2Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService14TestCaseOperation3 = "OperationName"
+
+// InputService14TestCaseOperation3Request generates a request for the InputService14TestCaseOperation3 operation.
+func (c *InputService14ProtocolTest) InputService14TestCaseOperation3Request(input *InputService14TestShapeInputShape) (req *aws.Request, output *InputService14TestShapeInputService14TestCaseOperation3Output) {
+ op := &aws.Operation{
+ Name: opInputService14TestCaseOperation3,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &InputService14TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService14TestShapeInputService14TestCaseOperation3Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService14ProtocolTest) InputService14TestCaseOperation3(input *InputService14TestShapeInputShape) (*InputService14TestShapeInputService14TestCaseOperation3Output, error) {
+ req, out := c.InputService14TestCaseOperation3Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService14TestShapeFooShape struct {
+ Baz *string `locationName:"baz" type:"string"`
+
+ metadataInputService14TestShapeFooShape `json:"-" xml:"-"`
+}
+
+type metadataInputService14TestShapeFooShape struct {
+ SDKShapeTraits bool `locationName:"foo" type:"structure"`
+}
+
+type InputService14TestShapeInputService14TestCaseOperation1Output struct {
+ metadataInputService14TestShapeInputService14TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService14TestShapeInputService14TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService14TestShapeInputService14TestCaseOperation2Output struct {
+ metadataInputService14TestShapeInputService14TestCaseOperation2Output `json:"-" xml:"-"`
+}
+
+type metadataInputService14TestShapeInputService14TestCaseOperation2Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService14TestShapeInputService14TestCaseOperation3Output struct {
+ metadataInputService14TestShapeInputService14TestCaseOperation3Output `json:"-" xml:"-"`
+}
+
+type metadataInputService14TestShapeInputService14TestCaseOperation3Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService14TestShapeInputShape struct {
+ Foo *InputService14TestShapeFooShape `locationName:"foo" type:"structure"`
+
+ metadataInputService14TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService14TestShapeInputShape struct {
+ SDKShapeTraits bool `type:"structure" payload:"Foo"`
+}
+
+// InputService15ProtocolTest is a client for InputService15ProtocolTest.
+type InputService15ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new InputService15ProtocolTest client.
+func NewInputService15ProtocolTest(config *aws.Config) *InputService15ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "inputservice15protocoltest",
+ APIVersion: "2014-01-01",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return &InputService15ProtocolTest{service}
+}
+
+// newRequest creates a new request for a InputService15ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService15ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opInputService15TestCaseOperation1 = "OperationName"
+
+// InputService15TestCaseOperation1Request generates a request for the InputService15TestCaseOperation1 operation.
+func (c *InputService15ProtocolTest) InputService15TestCaseOperation1Request(input *InputService15TestShapeInputShape) (req *aws.Request, output *InputService15TestShapeInputService15TestCaseOperation1Output) {
+ op := &aws.Operation{
+ Name: opInputService15TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &InputService15TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService15TestShapeInputService15TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService15ProtocolTest) InputService15TestCaseOperation1(input *InputService15TestShapeInputShape) (*InputService15TestShapeInputService15TestCaseOperation1Output, error) {
+ req, out := c.InputService15TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService15TestShapeGrant struct {
+ Grantee *InputService15TestShapeGrantee `type:"structure"`
+
+ metadataInputService15TestShapeGrant `json:"-" xml:"-"`
+}
+
+type metadataInputService15TestShapeGrant struct {
+ SDKShapeTraits bool `locationName:"Grant" type:"structure"`
+}
+
+type InputService15TestShapeGrantee struct {
+ EmailAddress *string `type:"string"`
+
+ Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true"`
+
+ metadataInputService15TestShapeGrantee `json:"-" xml:"-"`
+}
+
+type metadataInputService15TestShapeGrantee struct {
+ SDKShapeTraits bool `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"`
+}
+
+type InputService15TestShapeInputService15TestCaseOperation1Output struct {
+ metadataInputService15TestShapeInputService15TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService15TestShapeInputService15TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService15TestShapeInputShape struct {
+ Grant *InputService15TestShapeGrant `locationName:"Grant" type:"structure"`
+
+ metadataInputService15TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService15TestShapeInputShape struct {
+ SDKShapeTraits bool `type:"structure" payload:"Grant"`
+}
+
+// InputService16ProtocolTest is a client for InputService16ProtocolTest.
+type InputService16ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new InputService16ProtocolTest client.
+func NewInputService16ProtocolTest(config *aws.Config) *InputService16ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "inputservice16protocoltest",
+ APIVersion: "2014-01-01",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return &InputService16ProtocolTest{service}
+}
+
+// newRequest creates a new request for a InputService16ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService16ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opInputService16TestCaseOperation1 = "OperationName"
+
+// InputService16TestCaseOperation1Request generates a request for the InputService16TestCaseOperation1 operation.
+func (c *InputService16ProtocolTest) InputService16TestCaseOperation1Request(input *InputService16TestShapeInputShape) (req *aws.Request, output *InputService16TestShapeInputService16TestCaseOperation1Output) {
+ op := &aws.Operation{
+ Name: opInputService16TestCaseOperation1,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &InputService16TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService16TestShapeInputService16TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService16ProtocolTest) InputService16TestCaseOperation1(input *InputService16TestShapeInputShape) (*InputService16TestShapeInputService16TestCaseOperation1Output, error) {
+ req, out := c.InputService16TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService16TestShapeInputService16TestCaseOperation1Output struct {
+ metadataInputService16TestShapeInputService16TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService16TestShapeInputService16TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService16TestShapeInputShape struct {
+ Bucket *string `location:"uri" type:"string"`
+
+ Key *string `location:"uri" type:"string"`
+
+ metadataInputService16TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService16TestShapeInputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// InputService17ProtocolTest is a client for InputService17ProtocolTest.
+type InputService17ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new InputService17ProtocolTest client.
+func NewInputService17ProtocolTest(config *aws.Config) *InputService17ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "inputservice17protocoltest",
+ APIVersion: "2014-01-01",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return &InputService17ProtocolTest{service}
+}
+
+// newRequest creates a new request for a InputService17ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService17ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opInputService17TestCaseOperation1 = "OperationName"
+
+// InputService17TestCaseOperation1Request generates a request for the InputService17TestCaseOperation1 operation.
+func (c *InputService17ProtocolTest) InputService17TestCaseOperation1Request(input *InputService17TestShapeInputShape) (req *aws.Request, output *InputService17TestShapeInputService17TestCaseOperation1Output) {
+ op := &aws.Operation{
+ Name: opInputService17TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/path",
+ }
+
+ if input == nil {
+ input = &InputService17TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService17TestShapeInputService17TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService17ProtocolTest) InputService17TestCaseOperation1(input *InputService17TestShapeInputShape) (*InputService17TestShapeInputService17TestCaseOperation1Output, error) {
+ req, out := c.InputService17TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService17TestCaseOperation2 = "OperationName"
+
+// InputService17TestCaseOperation2Request generates a request for the InputService17TestCaseOperation2 operation.
+func (c *InputService17ProtocolTest) InputService17TestCaseOperation2Request(input *InputService17TestShapeInputShape) (req *aws.Request, output *InputService17TestShapeInputService17TestCaseOperation2Output) {
+ op := &aws.Operation{
+ Name: opInputService17TestCaseOperation2,
+ HTTPMethod: "POST",
+ HTTPPath: "/path?abc=mno",
+ }
+
+ if input == nil {
+ input = &InputService17TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService17TestShapeInputService17TestCaseOperation2Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService17ProtocolTest) InputService17TestCaseOperation2(input *InputService17TestShapeInputShape) (*InputService17TestShapeInputService17TestCaseOperation2Output, error) {
+ req, out := c.InputService17TestCaseOperation2Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService17TestShapeInputService17TestCaseOperation1Output struct {
+ metadataInputService17TestShapeInputService17TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService17TestShapeInputService17TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService17TestShapeInputService17TestCaseOperation2Output struct {
+ metadataInputService17TestShapeInputService17TestCaseOperation2Output `json:"-" xml:"-"`
+}
+
+type metadataInputService17TestShapeInputService17TestCaseOperation2Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService17TestShapeInputShape struct {
+ Foo *string `location:"querystring" locationName:"param-name" type:"string"`
+
+ metadataInputService17TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService17TestShapeInputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// InputService18ProtocolTest is a client for InputService18ProtocolTest.
+type InputService18ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new InputService18ProtocolTest client.
+func NewInputService18ProtocolTest(config *aws.Config) *InputService18ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "inputservice18protocoltest",
+ APIVersion: "2014-01-01",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return &InputService18ProtocolTest{service}
+}
+
+// newRequest creates a new request for a InputService18ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService18ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opInputService18TestCaseOperation1 = "OperationName"
+
+// InputService18TestCaseOperation1Request generates a request for the InputService18TestCaseOperation1 operation.
+func (c *InputService18ProtocolTest) InputService18TestCaseOperation1Request(input *InputService18TestShapeInputShape) (req *aws.Request, output *InputService18TestShapeInputService18TestCaseOperation1Output) {
+ op := &aws.Operation{
+ Name: opInputService18TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/path",
+ }
+
+ if input == nil {
+ input = &InputService18TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService18TestShapeInputService18TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService18ProtocolTest) InputService18TestCaseOperation1(input *InputService18TestShapeInputShape) (*InputService18TestShapeInputService18TestCaseOperation1Output, error) {
+ req, out := c.InputService18TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService18TestCaseOperation2 = "OperationName"
+
+// InputService18TestCaseOperation2Request generates a request for the InputService18TestCaseOperation2 operation.
+func (c *InputService18ProtocolTest) InputService18TestCaseOperation2Request(input *InputService18TestShapeInputShape) (req *aws.Request, output *InputService18TestShapeInputService18TestCaseOperation2Output) {
+ op := &aws.Operation{
+ Name: opInputService18TestCaseOperation2,
+ HTTPMethod: "POST",
+ HTTPPath: "/path",
+ }
+
+ if input == nil {
+ input = &InputService18TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService18TestShapeInputService18TestCaseOperation2Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService18ProtocolTest) InputService18TestCaseOperation2(input *InputService18TestShapeInputShape) (*InputService18TestShapeInputService18TestCaseOperation2Output, error) {
+ req, out := c.InputService18TestCaseOperation2Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService18TestCaseOperation3 = "OperationName"
+
+// InputService18TestCaseOperation3Request generates a request for the InputService18TestCaseOperation3 operation.
+func (c *InputService18ProtocolTest) InputService18TestCaseOperation3Request(input *InputService18TestShapeInputShape) (req *aws.Request, output *InputService18TestShapeInputService18TestCaseOperation3Output) {
+ op := &aws.Operation{
+ Name: opInputService18TestCaseOperation3,
+ HTTPMethod: "POST",
+ HTTPPath: "/path",
+ }
+
+ if input == nil {
+ input = &InputService18TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService18TestShapeInputService18TestCaseOperation3Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService18ProtocolTest) InputService18TestCaseOperation3(input *InputService18TestShapeInputShape) (*InputService18TestShapeInputService18TestCaseOperation3Output, error) {
+ req, out := c.InputService18TestCaseOperation3Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService18TestCaseOperation4 = "OperationName"
+
+// InputService18TestCaseOperation4Request generates a request for the InputService18TestCaseOperation4 operation.
+func (c *InputService18ProtocolTest) InputService18TestCaseOperation4Request(input *InputService18TestShapeInputShape) (req *aws.Request, output *InputService18TestShapeInputService18TestCaseOperation4Output) {
+ op := &aws.Operation{
+ Name: opInputService18TestCaseOperation4,
+ HTTPMethod: "POST",
+ HTTPPath: "/path",
+ }
+
+ if input == nil {
+ input = &InputService18TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService18TestShapeInputService18TestCaseOperation4Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService18ProtocolTest) InputService18TestCaseOperation4(input *InputService18TestShapeInputShape) (*InputService18TestShapeInputService18TestCaseOperation4Output, error) {
+ req, out := c.InputService18TestCaseOperation4Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService18TestCaseOperation5 = "OperationName"
+
+// InputService18TestCaseOperation5Request generates a request for the InputService18TestCaseOperation5 operation.
+func (c *InputService18ProtocolTest) InputService18TestCaseOperation5Request(input *InputService18TestShapeInputShape) (req *aws.Request, output *InputService18TestShapeInputService18TestCaseOperation5Output) {
+ op := &aws.Operation{
+ Name: opInputService18TestCaseOperation5,
+ HTTPMethod: "POST",
+ HTTPPath: "/path",
+ }
+
+ if input == nil {
+ input = &InputService18TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService18TestShapeInputService18TestCaseOperation5Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService18ProtocolTest) InputService18TestCaseOperation5(input *InputService18TestShapeInputShape) (*InputService18TestShapeInputService18TestCaseOperation5Output, error) {
+ req, out := c.InputService18TestCaseOperation5Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService18TestCaseOperation6 = "OperationName"
+
+// InputService18TestCaseOperation6Request generates a request for the InputService18TestCaseOperation6 operation.
+func (c *InputService18ProtocolTest) InputService18TestCaseOperation6Request(input *InputService18TestShapeInputShape) (req *aws.Request, output *InputService18TestShapeInputService18TestCaseOperation6Output) {
+ op := &aws.Operation{
+ Name: opInputService18TestCaseOperation6,
+ HTTPMethod: "POST",
+ HTTPPath: "/path",
+ }
+
+ if input == nil {
+ input = &InputService18TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService18TestShapeInputService18TestCaseOperation6Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService18ProtocolTest) InputService18TestCaseOperation6(input *InputService18TestShapeInputShape) (*InputService18TestShapeInputService18TestCaseOperation6Output, error) {
+ req, out := c.InputService18TestCaseOperation6Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService18TestShapeInputService18TestCaseOperation1Output struct {
+ metadataInputService18TestShapeInputService18TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService18TestShapeInputService18TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService18TestShapeInputService18TestCaseOperation2Output struct {
+ metadataInputService18TestShapeInputService18TestCaseOperation2Output `json:"-" xml:"-"`
+}
+
+type metadataInputService18TestShapeInputService18TestCaseOperation2Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService18TestShapeInputService18TestCaseOperation3Output struct {
+ metadataInputService18TestShapeInputService18TestCaseOperation3Output `json:"-" xml:"-"`
+}
+
+type metadataInputService18TestShapeInputService18TestCaseOperation3Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService18TestShapeInputService18TestCaseOperation4Output struct {
+ metadataInputService18TestShapeInputService18TestCaseOperation4Output `json:"-" xml:"-"`
+}
+
+type metadataInputService18TestShapeInputService18TestCaseOperation4Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService18TestShapeInputService18TestCaseOperation5Output struct {
+ metadataInputService18TestShapeInputService18TestCaseOperation5Output `json:"-" xml:"-"`
+}
+
+type metadataInputService18TestShapeInputService18TestCaseOperation5Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService18TestShapeInputService18TestCaseOperation6Output struct {
+ metadataInputService18TestShapeInputService18TestCaseOperation6Output `json:"-" xml:"-"`
+}
+
+type metadataInputService18TestShapeInputService18TestCaseOperation6Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService18TestShapeInputShape struct {
+ RecursiveStruct *InputService18TestShapeRecursiveStructType `type:"structure"`
+
+ metadataInputService18TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService18TestShapeInputShape struct {
+ SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"`
+}
+
+type InputService18TestShapeRecursiveStructType struct {
+ NoRecurse *string `type:"string"`
+
+ RecursiveList []*InputService18TestShapeRecursiveStructType `type:"list"`
+
+ RecursiveMap map[string]*InputService18TestShapeRecursiveStructType `type:"map"`
+
+ RecursiveStruct *InputService18TestShapeRecursiveStructType `type:"structure"`
+
+ metadataInputService18TestShapeRecursiveStructType `json:"-" xml:"-"`
+}
+
+type metadataInputService18TestShapeRecursiveStructType struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// InputService19ProtocolTest is a client for InputService19ProtocolTest.
+type InputService19ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new InputService19ProtocolTest client.
+func NewInputService19ProtocolTest(config *aws.Config) *InputService19ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "inputservice19protocoltest",
+ APIVersion: "2014-01-01",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return &InputService19ProtocolTest{service}
+}
+
+// newRequest creates a new request for a InputService19ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService19ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opInputService19TestCaseOperation1 = "OperationName"
+
+// InputService19TestCaseOperation1Request generates a request for the InputService19TestCaseOperation1 operation.
+func (c *InputService19ProtocolTest) InputService19TestCaseOperation1Request(input *InputService19TestShapeInputShape) (req *aws.Request, output *InputService19TestShapeInputService19TestCaseOperation1Output) {
+ op := &aws.Operation{
+ Name: opInputService19TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/path",
+ }
+
+ if input == nil {
+ input = &InputService19TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService19TestShapeInputService19TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService19ProtocolTest) InputService19TestCaseOperation1(input *InputService19TestShapeInputShape) (*InputService19TestShapeInputService19TestCaseOperation1Output, error) {
+ req, out := c.InputService19TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService19TestShapeInputService19TestCaseOperation1Output struct {
+ metadataInputService19TestShapeInputService19TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService19TestShapeInputService19TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService19TestShapeInputShape struct {
+ TimeArgInHeader *time.Time `location:"header" locationName:"x-amz-timearg" type:"timestamp" timestampFormat:"rfc822"`
+
+ metadataInputService19TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService19TestShapeInputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//
+// Tests begin here
+//
+
+func TestInputService1ProtocolTestBasicXMLSerializationCase1(t *testing.T) {
+ svc := NewInputService1ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService1TestShapeInputShape{
+ Description: aws.String("bar"),
+ Name: aws.String("foo"),
+ }
+ req, _ := svc.InputService1TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ assert.Equal(t, util.Trim(`barfoo`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService1ProtocolTestBasicXMLSerializationCase2(t *testing.T) {
+ svc := NewInputService1ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService1TestShapeInputShape{
+ Description: aws.String("bar"),
+ Name: aws.String("foo"),
+ }
+ req, _ := svc.InputService1TestCaseOperation2Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ assert.Equal(t, util.Trim(`barfoo`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService2ProtocolTestSerializeOtherScalarTypesCase1(t *testing.T) {
+ svc := NewInputService2ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService2TestShapeInputShape{
+ First: aws.Boolean(true),
+ Fourth: aws.Long(3),
+ Second: aws.Boolean(false),
+ Third: aws.Double(1.2),
+ }
+ req, _ := svc.InputService2TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ assert.Equal(t, util.Trim(`true3false1.2`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService3ProtocolTestNestedStructuresCase1(t *testing.T) {
+ svc := NewInputService3ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService3TestShapeInputShape{
+ Description: aws.String("baz"),
+ SubStructure: &InputService3TestShapeSubStructure{
+ Bar: aws.String("b"),
+ Foo: aws.String("a"),
+ },
+ }
+ req, _ := svc.InputService3TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ assert.Equal(t, util.Trim(`bazba`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService4ProtocolTestNestedStructuresCase1(t *testing.T) {
+ svc := NewInputService4ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService4TestShapeInputShape{
+ Description: aws.String("baz"),
+ SubStructure: &InputService4TestShapeSubStructure{},
+ }
+ req, _ := svc.InputService4TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ assert.Equal(t, util.Trim(`baz`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService5ProtocolTestNonFlattenedListsCase1(t *testing.T) {
+ svc := NewInputService5ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService5TestShapeInputShape{
+ ListParam: []*string{
+ aws.String("one"),
+ aws.String("two"),
+ aws.String("three"),
+ },
+ }
+ req, _ := svc.InputService5TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ assert.Equal(t, util.Trim(`onetwothree`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService6ProtocolTestNonFlattenedListsWithLocationNameCase1(t *testing.T) {
+ svc := NewInputService6ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService6TestShapeInputShape{
+ ListParam: []*string{
+ aws.String("one"),
+ aws.String("two"),
+ aws.String("three"),
+ },
+ }
+ req, _ := svc.InputService6TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ assert.Equal(t, util.Trim(`onetwothree`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService7ProtocolTestFlattenedListsCase1(t *testing.T) {
+ svc := NewInputService7ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService7TestShapeInputShape{
+ ListParam: []*string{
+ aws.String("one"),
+ aws.String("two"),
+ aws.String("three"),
+ },
+ }
+ req, _ := svc.InputService7TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ assert.Equal(t, util.Trim(`onetwothree`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService8ProtocolTestFlattenedListsWithLocationNameCase1(t *testing.T) {
+ svc := NewInputService8ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService8TestShapeInputShape{
+ ListParam: []*string{
+ aws.String("one"),
+ aws.String("two"),
+ aws.String("three"),
+ },
+ }
+ req, _ := svc.InputService8TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ assert.Equal(t, util.Trim(`- one
- two
- three
`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService9ProtocolTestListOfStructuresCase1(t *testing.T) {
+ svc := NewInputService9ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService9TestShapeInputShape{
+ ListParam: []*InputService9TestShapeSingleFieldStruct{
+ {
+ Element: aws.String("one"),
+ },
+ {
+ Element: aws.String("two"),
+ },
+ {
+ Element: aws.String("three"),
+ },
+ },
+ }
+ req, _ := svc.InputService9TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ assert.Equal(t, util.Trim(`- one
- two
- three
`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService10ProtocolTestBlobAndTimestampShapesCase1(t *testing.T) {
+ svc := NewInputService10ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService10TestShapeInputShape{
+ StructureParam: &InputService10TestShapeStructureShape{
+ B: []byte("foo"),
+ T: aws.Time(time.Unix(1422172800, 0)),
+ },
+ }
+ req, _ := svc.InputService10TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ assert.Equal(t, util.Trim(`Zm9v2015-01-25T08:00:00Z`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService11ProtocolTestHeaderMapsCase1(t *testing.T) {
+ svc := NewInputService11ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService11TestShapeInputShape{
+ Foo: map[string]*string{
+ "a": aws.String("b"),
+ "c": aws.String("d"),
+ },
+ }
+ req, _ := svc.InputService11TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert URL
+ assert.Equal(t, "https://test/", r.URL.String())
+
+ // assert headers
+ assert.Equal(t, "b", r.Header.Get("x-foo-a"))
+ assert.Equal(t, "d", r.Header.Get("x-foo-c"))
+
+}
+
+func TestInputService12ProtocolTestStringPayloadCase1(t *testing.T) {
+ svc := NewInputService12ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService12TestShapeInputShape{
+ Foo: aws.String("bar"),
+ }
+ req, _ := svc.InputService12TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ assert.Equal(t, util.Trim(`bar`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService13ProtocolTestBlobPayloadCase1(t *testing.T) {
+ svc := NewInputService13ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService13TestShapeInputShape{
+ Foo: []byte("bar"),
+ }
+ req, _ := svc.InputService13TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ assert.Equal(t, util.Trim(`bar`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService13ProtocolTestBlobPayloadCase2(t *testing.T) {
+ svc := NewInputService13ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService13TestShapeInputShape{}
+ req, _ := svc.InputService13TestCaseOperation2Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert URL
+ assert.Equal(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService14ProtocolTestStructurePayloadCase1(t *testing.T) {
+ svc := NewInputService14ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService14TestShapeInputShape{
+ Foo: &InputService14TestShapeFooShape{
+ Baz: aws.String("bar"),
+ },
+ }
+ req, _ := svc.InputService14TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ assert.Equal(t, util.Trim(`bar`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService14ProtocolTestStructurePayloadCase2(t *testing.T) {
+ svc := NewInputService14ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService14TestShapeInputShape{
+ Foo: &InputService14TestShapeFooShape{},
+ }
+ req, _ := svc.InputService14TestCaseOperation2Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ assert.Equal(t, util.Trim(``), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService14ProtocolTestStructurePayloadCase3(t *testing.T) {
+ svc := NewInputService14ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService14TestShapeInputShape{}
+ req, _ := svc.InputService14TestCaseOperation3Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert URL
+ assert.Equal(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService15ProtocolTestXMLAttributeCase1(t *testing.T) {
+ svc := NewInputService15ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService15TestShapeInputShape{
+ Grant: &InputService15TestShapeGrant{
+ Grantee: &InputService15TestShapeGrantee{
+ EmailAddress: aws.String("foo@example.com"),
+ Type: aws.String("CanonicalUser"),
+ },
+ },
+ }
+ req, _ := svc.InputService15TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ assert.Equal(t, util.Trim(`foo@example.com`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService16ProtocolTestGreedyKeysCase1(t *testing.T) {
+ svc := NewInputService16ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService16TestShapeInputShape{
+ Bucket: aws.String("my/bucket"),
+ Key: aws.String("testing /123"),
+ }
+ req, _ := svc.InputService16TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert URL
+ assert.Equal(t, "https://test/my%2Fbucket/testing%20/123", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService17ProtocolTestOmitsNullQueryParamsButSerializesEmptyStringsCase1(t *testing.T) {
+ svc := NewInputService17ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService17TestShapeInputShape{}
+ req, _ := svc.InputService17TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert URL
+ assert.Equal(t, "https://test/path", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService17ProtocolTestOmitsNullQueryParamsButSerializesEmptyStringsCase2(t *testing.T) {
+ svc := NewInputService17ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService17TestShapeInputShape{
+ Foo: aws.String(""),
+ }
+ req, _ := svc.InputService17TestCaseOperation2Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert URL
+ assert.Equal(t, "https://test/path?abc=mno¶m-name=", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService18ProtocolTestRecursiveShapesCase1(t *testing.T) {
+ svc := NewInputService18ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService18TestShapeInputShape{
+ RecursiveStruct: &InputService18TestShapeRecursiveStructType{
+ NoRecurse: aws.String("foo"),
+ },
+ }
+ req, _ := svc.InputService18TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ assert.Equal(t, util.Trim(`foo`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/path", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService18ProtocolTestRecursiveShapesCase2(t *testing.T) {
+ svc := NewInputService18ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService18TestShapeInputShape{
+ RecursiveStruct: &InputService18TestShapeRecursiveStructType{
+ RecursiveStruct: &InputService18TestShapeRecursiveStructType{
+ NoRecurse: aws.String("foo"),
+ },
+ },
+ }
+ req, _ := svc.InputService18TestCaseOperation2Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ assert.Equal(t, util.Trim(`foo`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/path", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService18ProtocolTestRecursiveShapesCase3(t *testing.T) {
+ svc := NewInputService18ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService18TestShapeInputShape{
+ RecursiveStruct: &InputService18TestShapeRecursiveStructType{
+ RecursiveStruct: &InputService18TestShapeRecursiveStructType{
+ RecursiveStruct: &InputService18TestShapeRecursiveStructType{
+ RecursiveStruct: &InputService18TestShapeRecursiveStructType{
+ NoRecurse: aws.String("foo"),
+ },
+ },
+ },
+ },
+ }
+ req, _ := svc.InputService18TestCaseOperation3Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ assert.Equal(t, util.Trim(`foo`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/path", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService18ProtocolTestRecursiveShapesCase4(t *testing.T) {
+ svc := NewInputService18ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService18TestShapeInputShape{
+ RecursiveStruct: &InputService18TestShapeRecursiveStructType{
+ RecursiveList: []*InputService18TestShapeRecursiveStructType{
+ {
+ NoRecurse: aws.String("foo"),
+ },
+ {
+ NoRecurse: aws.String("bar"),
+ },
+ },
+ },
+ }
+ req, _ := svc.InputService18TestCaseOperation4Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ assert.Equal(t, util.Trim(`foobar`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/path", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService18ProtocolTestRecursiveShapesCase5(t *testing.T) {
+ svc := NewInputService18ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService18TestShapeInputShape{
+ RecursiveStruct: &InputService18TestShapeRecursiveStructType{
+ RecursiveList: []*InputService18TestShapeRecursiveStructType{
+ {
+ NoRecurse: aws.String("foo"),
+ },
+ {
+ RecursiveStruct: &InputService18TestShapeRecursiveStructType{
+ NoRecurse: aws.String("bar"),
+ },
+ },
+ },
+ },
+ }
+ req, _ := svc.InputService18TestCaseOperation5Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ assert.Equal(t, util.Trim(`foobar`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/path", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService18ProtocolTestRecursiveShapesCase6(t *testing.T) {
+ svc := NewInputService18ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService18TestShapeInputShape{
+ RecursiveStruct: &InputService18TestShapeRecursiveStructType{
+ RecursiveMap: map[string]*InputService18TestShapeRecursiveStructType{
+ "bar": {
+ NoRecurse: aws.String("bar"),
+ },
+ "foo": {
+ NoRecurse: aws.String("foo"),
+ },
+ },
+ },
+ }
+ req, _ := svc.InputService18TestCaseOperation6Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ assert.Equal(t, util.Trim(`barbarfoofoo`), util.Trim(string(body)))
+
+ // assert URL
+ assert.Equal(t, "https://test/path", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService19ProtocolTestTimestampInHeaderCase1(t *testing.T) {
+ svc := NewInputService19ProtocolTest(nil)
+ svc.Endpoint = "https://test"
+
+ input := &InputService19TestShapeInputShape{
+ TimeArgInHeader: aws.Time(time.Unix(1422172800, 0)),
+ }
+ req, _ := svc.InputService19TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert URL
+ assert.Equal(t, "https://test/path", r.URL.String())
+
+ // assert headers
+ assert.Equal(t, "Sun, 25 Jan 2015 08:00:00 GMT", r.Header.Get("x-amz-timearg"))
+
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/restxml/restxml.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/restxml/restxml.go
new file mode 100644
index 0000000..d6cbff4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/restxml/restxml.go
@@ -0,0 +1,55 @@
+// Package restxml provides RESTful XML serialisation of AWS
+// requests and responses.
+package restxml
+
+//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/input/rest-xml.json build_test.go
+//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/output/rest-xml.json unmarshal_test.go
+
+import (
+ "bytes"
+ "encoding/xml"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/internal/protocol/query"
+ "github.com/aws/aws-sdk-go/internal/protocol/rest"
+ "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil"
+)
+
+// Build builds a request payload for the REST XML protocol.
+func Build(r *aws.Request) {
+ rest.Build(r)
+
+ if t := rest.PayloadType(r.Params); t == "structure" || t == "" {
+ var buf bytes.Buffer
+ err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf))
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to enode rest XML request", err)
+ return
+ }
+ r.SetBufferBody(buf.Bytes())
+ }
+}
+
+// Unmarshal unmarshals a payload response for the REST XML protocol.
+func Unmarshal(r *aws.Request) {
+ if t := rest.PayloadType(r.Data); t == "structure" || t == "" {
+ defer r.HTTPResponse.Body.Close()
+ decoder := xml.NewDecoder(r.HTTPResponse.Body)
+ err := xmlutil.UnmarshalXML(r.Data, decoder, "")
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to decode REST XML response", err)
+ return
+ }
+ }
+}
+
+// UnmarshalMeta unmarshals response headers for the REST XML protocol.
+func UnmarshalMeta(r *aws.Request) {
+ rest.Unmarshal(r)
+}
+
+// UnmarshalError unmarshals a response error for the REST XML protocol.
+func UnmarshalError(r *aws.Request) {
+ query.UnmarshalError(r)
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/restxml/unmarshal_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/restxml/unmarshal_test.go
new file mode 100644
index 0000000..7efb93d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/restxml/unmarshal_test.go
@@ -0,0 +1,1322 @@
+package restxml_test
+
+import (
+ "bytes"
+ "encoding/json"
+ "encoding/xml"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/internal/protocol/restxml"
+ "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil"
+ "github.com/aws/aws-sdk-go/internal/signer/v4"
+ "github.com/aws/aws-sdk-go/internal/util"
+ "github.com/stretchr/testify/assert"
+)
+
+var _ bytes.Buffer // always import bytes
+var _ http.Request
+var _ json.Marshaler
+var _ time.Time
+var _ xmlutil.XMLNode
+var _ xml.Attr
+var _ = ioutil.Discard
+var _ = util.Trim("")
+var _ = url.Values{}
+var _ = io.EOF
+
+// OutputService1ProtocolTest is a client for OutputService1ProtocolTest.
+type OutputService1ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new OutputService1ProtocolTest client.
+func NewOutputService1ProtocolTest(config *aws.Config) *OutputService1ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "outputservice1protocoltest",
+ APIVersion: "",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return &OutputService1ProtocolTest{service}
+}
+
+// newRequest creates a new request for a OutputService1ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService1ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opOutputService1TestCaseOperation1 = "OperationName"
+
+// OutputService1TestCaseOperation1Request generates a request for the OutputService1TestCaseOperation1 operation.
+func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1Request(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (req *aws.Request, output *OutputService1TestShapeOutputShape) {
+ op := &aws.Operation{
+ Name: opOutputService1TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService1TestShapeOutputService1TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService1TestShapeOutputShape{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (*OutputService1TestShapeOutputShape, error) {
+ req, out := c.OutputService1TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opOutputService1TestCaseOperation2 = "OperationName"
+
+// OutputService1TestCaseOperation2Request generates a request for the OutputService1TestCaseOperation2 operation.
+func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation2Request(input *OutputService1TestShapeOutputService1TestCaseOperation2Input) (req *aws.Request, output *OutputService1TestShapeOutputShape) {
+ op := &aws.Operation{
+ Name: opOutputService1TestCaseOperation2,
+ }
+
+ if input == nil {
+ input = &OutputService1TestShapeOutputService1TestCaseOperation2Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService1TestShapeOutputShape{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation2(input *OutputService1TestShapeOutputService1TestCaseOperation2Input) (*OutputService1TestShapeOutputShape, error) {
+ req, out := c.OutputService1TestCaseOperation2Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService1TestShapeOutputService1TestCaseOperation1Input struct {
+ metadataOutputService1TestShapeOutputService1TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService1TestShapeOutputService1TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService1TestShapeOutputService1TestCaseOperation2Input struct {
+ metadataOutputService1TestShapeOutputService1TestCaseOperation2Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService1TestShapeOutputService1TestCaseOperation2Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService1TestShapeOutputShape struct {
+ Char *string `type:"character"`
+
+ Double *float64 `type:"double"`
+
+ FalseBool *bool `type:"boolean"`
+
+ Float *float64 `type:"float"`
+
+ ImaHeader *string `location:"header" type:"string"`
+
+ ImaHeaderLocation *string `location:"header" locationName:"X-Foo" type:"string"`
+
+ Long *int64 `type:"long"`
+
+ Num *int64 `locationName:"FooNum" type:"integer"`
+
+ Str *string `type:"string"`
+
+ Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ TrueBool *bool `type:"boolean"`
+
+ metadataOutputService1TestShapeOutputShape `json:"-" xml:"-"`
+}
+
+type metadataOutputService1TestShapeOutputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// OutputService2ProtocolTest is a client for OutputService2ProtocolTest.
+type OutputService2ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new OutputService2ProtocolTest client.
+func NewOutputService2ProtocolTest(config *aws.Config) *OutputService2ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "outputservice2protocoltest",
+ APIVersion: "",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return &OutputService2ProtocolTest{service}
+}
+
+// newRequest creates a new request for a OutputService2ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService2ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opOutputService2TestCaseOperation1 = "OperationName"
+
+// OutputService2TestCaseOperation1Request generates a request for the OutputService2TestCaseOperation1 operation.
+func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1Request(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (req *aws.Request, output *OutputService2TestShapeOutputShape) {
+ op := &aws.Operation{
+ Name: opOutputService2TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService2TestShapeOutputService2TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService2TestShapeOutputShape{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (*OutputService2TestShapeOutputShape, error) {
+ req, out := c.OutputService2TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService2TestShapeOutputService2TestCaseOperation1Input struct {
+ metadataOutputService2TestShapeOutputService2TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService2TestShapeOutputService2TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService2TestShapeOutputShape struct {
+ Blob []byte `type:"blob"`
+
+ metadataOutputService2TestShapeOutputShape `json:"-" xml:"-"`
+}
+
+type metadataOutputService2TestShapeOutputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// OutputService3ProtocolTest is a client for OutputService3ProtocolTest.
+type OutputService3ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new OutputService3ProtocolTest client.
+func NewOutputService3ProtocolTest(config *aws.Config) *OutputService3ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "outputservice3protocoltest",
+ APIVersion: "",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return &OutputService3ProtocolTest{service}
+}
+
+// newRequest creates a new request for a OutputService3ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService3ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opOutputService3TestCaseOperation1 = "OperationName"
+
+// OutputService3TestCaseOperation1Request generates a request for the OutputService3TestCaseOperation1 operation.
+func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1Request(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (req *aws.Request, output *OutputService3TestShapeOutputShape) {
+ op := &aws.Operation{
+ Name: opOutputService3TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService3TestShapeOutputService3TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService3TestShapeOutputShape{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (*OutputService3TestShapeOutputShape, error) {
+ req, out := c.OutputService3TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService3TestShapeOutputService3TestCaseOperation1Input struct {
+ metadataOutputService3TestShapeOutputService3TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService3TestShapeOutputService3TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService3TestShapeOutputShape struct {
+ ListMember []*string `type:"list"`
+
+ metadataOutputService3TestShapeOutputShape `json:"-" xml:"-"`
+}
+
+type metadataOutputService3TestShapeOutputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// OutputService4ProtocolTest is a client for OutputService4ProtocolTest.
+type OutputService4ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new OutputService4ProtocolTest client.
+func NewOutputService4ProtocolTest(config *aws.Config) *OutputService4ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "outputservice4protocoltest",
+ APIVersion: "",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return &OutputService4ProtocolTest{service}
+}
+
+// newRequest creates a new request for a OutputService4ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService4ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opOutputService4TestCaseOperation1 = "OperationName"
+
+// OutputService4TestCaseOperation1Request generates a request for the OutputService4TestCaseOperation1 operation.
+func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1Request(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (req *aws.Request, output *OutputService4TestShapeOutputShape) {
+ op := &aws.Operation{
+ Name: opOutputService4TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService4TestShapeOutputService4TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService4TestShapeOutputShape{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (*OutputService4TestShapeOutputShape, error) {
+ req, out := c.OutputService4TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService4TestShapeOutputService4TestCaseOperation1Input struct {
+ metadataOutputService4TestShapeOutputService4TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService4TestShapeOutputService4TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService4TestShapeOutputShape struct {
+ ListMember []*string `locationNameList:"item" type:"list"`
+
+ metadataOutputService4TestShapeOutputShape `json:"-" xml:"-"`
+}
+
+type metadataOutputService4TestShapeOutputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// OutputService5ProtocolTest is a client for OutputService5ProtocolTest.
+type OutputService5ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new OutputService5ProtocolTest client.
+func NewOutputService5ProtocolTest(config *aws.Config) *OutputService5ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "outputservice5protocoltest",
+ APIVersion: "",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return &OutputService5ProtocolTest{service}
+}
+
+// newRequest creates a new request for a OutputService5ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService5ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opOutputService5TestCaseOperation1 = "OperationName"
+
+// OutputService5TestCaseOperation1Request generates a request for the OutputService5TestCaseOperation1 operation.
+func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1Request(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (req *aws.Request, output *OutputService5TestShapeOutputShape) {
+ op := &aws.Operation{
+ Name: opOutputService5TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService5TestShapeOutputService5TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService5TestShapeOutputShape{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (*OutputService5TestShapeOutputShape, error) {
+ req, out := c.OutputService5TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService5TestShapeOutputService5TestCaseOperation1Input struct {
+ metadataOutputService5TestShapeOutputService5TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService5TestShapeOutputService5TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService5TestShapeOutputShape struct {
+ ListMember []*string `type:"list" flattened:"true"`
+
+ metadataOutputService5TestShapeOutputShape `json:"-" xml:"-"`
+}
+
+type metadataOutputService5TestShapeOutputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// OutputService6ProtocolTest is a client for OutputService6ProtocolTest.
+type OutputService6ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new OutputService6ProtocolTest client.
+func NewOutputService6ProtocolTest(config *aws.Config) *OutputService6ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "outputservice6protocoltest",
+ APIVersion: "",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return &OutputService6ProtocolTest{service}
+}
+
+// newRequest creates a new request for a OutputService6ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService6ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opOutputService6TestCaseOperation1 = "OperationName"
+
+// OutputService6TestCaseOperation1Request generates a request for the OutputService6TestCaseOperation1 operation.
+func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1Request(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (req *aws.Request, output *OutputService6TestShapeOutputShape) {
+ op := &aws.Operation{
+ Name: opOutputService6TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService6TestShapeOutputService6TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService6TestShapeOutputShape{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (*OutputService6TestShapeOutputShape, error) {
+ req, out := c.OutputService6TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService6TestShapeOutputService6TestCaseOperation1Input struct {
+ metadataOutputService6TestShapeOutputService6TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService6TestShapeOutputService6TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService6TestShapeOutputShape struct {
+ Map map[string]*OutputService6TestShapeSingleStructure `type:"map"`
+
+ metadataOutputService6TestShapeOutputShape `json:"-" xml:"-"`
+}
+
+type metadataOutputService6TestShapeOutputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService6TestShapeSingleStructure struct {
+ Foo *string `locationName:"foo" type:"string"`
+
+ metadataOutputService6TestShapeSingleStructure `json:"-" xml:"-"`
+}
+
+type metadataOutputService6TestShapeSingleStructure struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// OutputService7ProtocolTest is a client for OutputService7ProtocolTest.
+type OutputService7ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new OutputService7ProtocolTest client.
+func NewOutputService7ProtocolTest(config *aws.Config) *OutputService7ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "outputservice7protocoltest",
+ APIVersion: "",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return &OutputService7ProtocolTest{service}
+}
+
+// newRequest creates a new request for a OutputService7ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService7ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opOutputService7TestCaseOperation1 = "OperationName"
+
+// OutputService7TestCaseOperation1Request generates a request for the OutputService7TestCaseOperation1 operation.
+func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1Request(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (req *aws.Request, output *OutputService7TestShapeOutputShape) {
+ op := &aws.Operation{
+ Name: opOutputService7TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService7TestShapeOutputService7TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService7TestShapeOutputShape{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (*OutputService7TestShapeOutputShape, error) {
+ req, out := c.OutputService7TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService7TestShapeOutputService7TestCaseOperation1Input struct {
+ metadataOutputService7TestShapeOutputService7TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService7TestShapeOutputService7TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService7TestShapeOutputShape struct {
+ Map map[string]*string `type:"map" flattened:"true"`
+
+ metadataOutputService7TestShapeOutputShape `json:"-" xml:"-"`
+}
+
+type metadataOutputService7TestShapeOutputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// OutputService8ProtocolTest is a client for OutputService8ProtocolTest.
+type OutputService8ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new OutputService8ProtocolTest client.
+func NewOutputService8ProtocolTest(config *aws.Config) *OutputService8ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "outputservice8protocoltest",
+ APIVersion: "",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return &OutputService8ProtocolTest{service}
+}
+
+// newRequest creates a new request for a OutputService8ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService8ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opOutputService8TestCaseOperation1 = "OperationName"
+
+// OutputService8TestCaseOperation1Request generates a request for the OutputService8TestCaseOperation1 operation.
+func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1Request(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (req *aws.Request, output *OutputService8TestShapeOutputShape) {
+ op := &aws.Operation{
+ Name: opOutputService8TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService8TestShapeOutputService8TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService8TestShapeOutputShape{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (*OutputService8TestShapeOutputShape, error) {
+ req, out := c.OutputService8TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService8TestShapeOutputService8TestCaseOperation1Input struct {
+ metadataOutputService8TestShapeOutputService8TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService8TestShapeOutputService8TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService8TestShapeOutputShape struct {
+ Map map[string]*string `locationNameKey:"foo" locationNameValue:"bar" type:"map"`
+
+ metadataOutputService8TestShapeOutputShape `json:"-" xml:"-"`
+}
+
+type metadataOutputService8TestShapeOutputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// OutputService9ProtocolTest is a client for OutputService9ProtocolTest.
+type OutputService9ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new OutputService9ProtocolTest client.
+func NewOutputService9ProtocolTest(config *aws.Config) *OutputService9ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "outputservice9protocoltest",
+ APIVersion: "",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return &OutputService9ProtocolTest{service}
+}
+
+// newRequest creates a new request for a OutputService9ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService9ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opOutputService9TestCaseOperation1 = "OperationName"
+
+// OutputService9TestCaseOperation1Request generates a request for the OutputService9TestCaseOperation1 operation.
+func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1Request(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (req *aws.Request, output *OutputService9TestShapeOutputShape) {
+ op := &aws.Operation{
+ Name: opOutputService9TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService9TestShapeOutputService9TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService9TestShapeOutputShape{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (*OutputService9TestShapeOutputShape, error) {
+ req, out := c.OutputService9TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService9TestShapeOutputService9TestCaseOperation1Input struct {
+ metadataOutputService9TestShapeOutputService9TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService9TestShapeOutputService9TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService9TestShapeOutputShape struct {
+ Data *OutputService9TestShapeSingleStructure `type:"structure"`
+
+ Header *string `location:"header" locationName:"X-Foo" type:"string"`
+
+ metadataOutputService9TestShapeOutputShape `json:"-" xml:"-"`
+}
+
+type metadataOutputService9TestShapeOutputShape struct {
+ SDKShapeTraits bool `type:"structure" payload:"Data"`
+}
+
+type OutputService9TestShapeSingleStructure struct {
+ Foo *string `type:"string"`
+
+ metadataOutputService9TestShapeSingleStructure `json:"-" xml:"-"`
+}
+
+type metadataOutputService9TestShapeSingleStructure struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// OutputService10ProtocolTest is a client for OutputService10ProtocolTest.
+type OutputService10ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new OutputService10ProtocolTest client.
+func NewOutputService10ProtocolTest(config *aws.Config) *OutputService10ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "outputservice10protocoltest",
+ APIVersion: "",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return &OutputService10ProtocolTest{service}
+}
+
+// newRequest creates a new request for a OutputService10ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService10ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opOutputService10TestCaseOperation1 = "OperationName"
+
+// OutputService10TestCaseOperation1Request generates a request for the OutputService10TestCaseOperation1 operation.
+func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1Request(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (req *aws.Request, output *OutputService10TestShapeOutputShape) {
+ op := &aws.Operation{
+ Name: opOutputService10TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService10TestShapeOutputService10TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService10TestShapeOutputShape{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (*OutputService10TestShapeOutputShape, error) {
+ req, out := c.OutputService10TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService10TestShapeOutputService10TestCaseOperation1Input struct {
+ metadataOutputService10TestShapeOutputService10TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService10TestShapeOutputService10TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService10TestShapeOutputShape struct {
+ Stream []byte `type:"blob"`
+
+ metadataOutputService10TestShapeOutputShape `json:"-" xml:"-"`
+}
+
+type metadataOutputService10TestShapeOutputShape struct {
+ SDKShapeTraits bool `type:"structure" payload:"Stream"`
+}
+
+// OutputService11ProtocolTest is a client for OutputService11ProtocolTest.
+type OutputService11ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new OutputService11ProtocolTest client.
+func NewOutputService11ProtocolTest(config *aws.Config) *OutputService11ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "outputservice11protocoltest",
+ APIVersion: "",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return &OutputService11ProtocolTest{service}
+}
+
+// newRequest creates a new request for a OutputService11ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService11ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opOutputService11TestCaseOperation1 = "OperationName"
+
+// OutputService11TestCaseOperation1Request generates a request for the OutputService11TestCaseOperation1 operation.
+func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1Request(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (req *aws.Request, output *OutputService11TestShapeOutputShape) {
+ op := &aws.Operation{
+ Name: opOutputService11TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService11TestShapeOutputService11TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService11TestShapeOutputShape{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (*OutputService11TestShapeOutputShape, error) {
+ req, out := c.OutputService11TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService11TestShapeOutputService11TestCaseOperation1Input struct {
+ metadataOutputService11TestShapeOutputService11TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService11TestShapeOutputService11TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService11TestShapeOutputShape struct {
+ Char *string `location:"header" locationName:"x-char" type:"character"`
+
+ Double *float64 `location:"header" locationName:"x-double" type:"double"`
+
+ FalseBool *bool `location:"header" locationName:"x-false-bool" type:"boolean"`
+
+ Float *float64 `location:"header" locationName:"x-float" type:"float"`
+
+ Integer *int64 `location:"header" locationName:"x-int" type:"integer"`
+
+ Long *int64 `location:"header" locationName:"x-long" type:"long"`
+
+ Str *string `location:"header" locationName:"x-str" type:"string"`
+
+ Timestamp *time.Time `location:"header" locationName:"x-timestamp" type:"timestamp" timestampFormat:"iso8601"`
+
+ TrueBool *bool `location:"header" locationName:"x-true-bool" type:"boolean"`
+
+ metadataOutputService11TestShapeOutputShape `json:"-" xml:"-"`
+}
+
+type metadataOutputService11TestShapeOutputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// OutputService12ProtocolTest is a client for OutputService12ProtocolTest.
+type OutputService12ProtocolTest struct {
+ *aws.Service
+}
+
+// New returns a new OutputService12ProtocolTest client.
+func NewOutputService12ProtocolTest(config *aws.Config) *OutputService12ProtocolTest {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "outputservice12protocoltest",
+ APIVersion: "",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return &OutputService12ProtocolTest{service}
+}
+
+// newRequest creates a new request for a OutputService12ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService12ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ return req
+}
+
+const opOutputService12TestCaseOperation1 = "OperationName"
+
+// OutputService12TestCaseOperation1Request generates a request for the OutputService12TestCaseOperation1 operation.
+func (c *OutputService12ProtocolTest) OutputService12TestCaseOperation1Request(input *OutputService12TestShapeOutputService12TestCaseOperation1Input) (req *aws.Request, output *OutputService12TestShapeOutputShape) {
+ op := &aws.Operation{
+ Name: opOutputService12TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService12TestShapeOutputService12TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService12TestShapeOutputShape{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService12ProtocolTest) OutputService12TestCaseOperation1(input *OutputService12TestShapeOutputService12TestCaseOperation1Input) (*OutputService12TestShapeOutputShape, error) {
+ req, out := c.OutputService12TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService12TestShapeOutputService12TestCaseOperation1Input struct {
+ metadataOutputService12TestShapeOutputService12TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService12TestShapeOutputService12TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService12TestShapeOutputShape struct {
+ String *string `type:"string"`
+
+ metadataOutputService12TestShapeOutputShape `json:"-" xml:"-"`
+}
+
+type metadataOutputService12TestShapeOutputShape struct {
+ SDKShapeTraits bool `type:"structure" payload:"String"`
+}
+
+//
+// Tests begin here
+//
+
+func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) {
+ svc := NewOutputService1ProtocolTest(nil)
+
+ buf := bytes.NewReader([]byte("myname123falsetrue1.21.3200a2015-01-25T08:00:00Z"))
+ req, out := svc.OutputService1TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+ req.HTTPResponse.Header.Set("ImaHeader", "test")
+ req.HTTPResponse.Header.Set("X-Foo", "abc")
+
+ // unmarshal response
+ restxml.UnmarshalMeta(req)
+ restxml.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "a", *out.Char)
+ assert.Equal(t, 1.3, *out.Double)
+ assert.Equal(t, false, *out.FalseBool)
+ assert.Equal(t, 1.2, *out.Float)
+ assert.Equal(t, "test", *out.ImaHeader)
+ assert.Equal(t, "abc", *out.ImaHeaderLocation)
+ assert.Equal(t, int64(200), *out.Long)
+ assert.Equal(t, int64(123), *out.Num)
+ assert.Equal(t, "myname", *out.Str)
+ assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String())
+ assert.Equal(t, true, *out.TrueBool)
+
+}
+
+func TestOutputService1ProtocolTestScalarMembersCase2(t *testing.T) {
+ svc := NewOutputService1ProtocolTest(nil)
+
+ buf := bytes.NewReader([]byte("123falsetrue1.21.3200a2015-01-25T08:00:00Z"))
+ req, out := svc.OutputService1TestCaseOperation2Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+ req.HTTPResponse.Header.Set("ImaHeader", "test")
+ req.HTTPResponse.Header.Set("X-Foo", "abc")
+
+ // unmarshal response
+ restxml.UnmarshalMeta(req)
+ restxml.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "a", *out.Char)
+ assert.Equal(t, 1.3, *out.Double)
+ assert.Equal(t, false, *out.FalseBool)
+ assert.Equal(t, 1.2, *out.Float)
+ assert.Equal(t, "test", *out.ImaHeader)
+ assert.Equal(t, "abc", *out.ImaHeaderLocation)
+ assert.Equal(t, int64(200), *out.Long)
+ assert.Equal(t, int64(123), *out.Num)
+ assert.Equal(t, "", *out.Str)
+ assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String())
+ assert.Equal(t, true, *out.TrueBool)
+
+}
+
+func TestOutputService2ProtocolTestBlobCase1(t *testing.T) {
+ svc := NewOutputService2ProtocolTest(nil)
+
+ buf := bytes.NewReader([]byte("dmFsdWU="))
+ req, out := svc.OutputService2TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ restxml.UnmarshalMeta(req)
+ restxml.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "value", string(out.Blob))
+
+}
+
+func TestOutputService3ProtocolTestListsCase1(t *testing.T) {
+ svc := NewOutputService3ProtocolTest(nil)
+
+ buf := bytes.NewReader([]byte("abc123"))
+ req, out := svc.OutputService3TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ restxml.UnmarshalMeta(req)
+ restxml.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "abc", *out.ListMember[0])
+ assert.Equal(t, "123", *out.ListMember[1])
+
+}
+
+func TestOutputService4ProtocolTestListWithCustomMemberNameCase1(t *testing.T) {
+ svc := NewOutputService4ProtocolTest(nil)
+
+ buf := bytes.NewReader([]byte("- abc
- 123
"))
+ req, out := svc.OutputService4TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ restxml.UnmarshalMeta(req)
+ restxml.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "abc", *out.ListMember[0])
+ assert.Equal(t, "123", *out.ListMember[1])
+
+}
+
+func TestOutputService5ProtocolTestFlattenedListCase1(t *testing.T) {
+ svc := NewOutputService5ProtocolTest(nil)
+
+ buf := bytes.NewReader([]byte("abc123"))
+ req, out := svc.OutputService5TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ restxml.UnmarshalMeta(req)
+ restxml.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "abc", *out.ListMember[0])
+ assert.Equal(t, "123", *out.ListMember[1])
+
+}
+
+func TestOutputService6ProtocolTestNormalMapCase1(t *testing.T) {
+ svc := NewOutputService6ProtocolTest(nil)
+
+ buf := bytes.NewReader([]byte(""))
+ req, out := svc.OutputService6TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ restxml.UnmarshalMeta(req)
+ restxml.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "bam", *out.Map["baz"].Foo)
+ assert.Equal(t, "bar", *out.Map["qux"].Foo)
+
+}
+
+func TestOutputService7ProtocolTestFlattenedMapCase1(t *testing.T) {
+ svc := NewOutputService7ProtocolTest(nil)
+
+ buf := bytes.NewReader([]byte(""))
+ req, out := svc.OutputService7TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ restxml.UnmarshalMeta(req)
+ restxml.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "bam", *out.Map["baz"])
+ assert.Equal(t, "bar", *out.Map["qux"])
+
+}
+
+func TestOutputService8ProtocolTestNamedMapCase1(t *testing.T) {
+ svc := NewOutputService8ProtocolTest(nil)
+
+ buf := bytes.NewReader([]byte(""))
+ req, out := svc.OutputService8TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ restxml.UnmarshalMeta(req)
+ restxml.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "bam", *out.Map["baz"])
+ assert.Equal(t, "bar", *out.Map["qux"])
+
+}
+
+func TestOutputService9ProtocolTestXMLPayloadCase1(t *testing.T) {
+ svc := NewOutputService9ProtocolTest(nil)
+
+ buf := bytes.NewReader([]byte("abc"))
+ req, out := svc.OutputService9TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+ req.HTTPResponse.Header.Set("X-Foo", "baz")
+
+ // unmarshal response
+ restxml.UnmarshalMeta(req)
+ restxml.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "abc", *out.Data.Foo)
+ assert.Equal(t, "baz", *out.Header)
+
+}
+
+func TestOutputService10ProtocolTestStreamingPayloadCase1(t *testing.T) {
+ svc := NewOutputService10ProtocolTest(nil)
+
+ buf := bytes.NewReader([]byte("abc"))
+ req, out := svc.OutputService10TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ restxml.UnmarshalMeta(req)
+ restxml.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "abc", string(out.Stream))
+
+}
+
+func TestOutputService11ProtocolTestScalarMembersInHeadersCase1(t *testing.T) {
+ svc := NewOutputService11ProtocolTest(nil)
+
+ buf := bytes.NewReader([]byte(""))
+ req, out := svc.OutputService11TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+ req.HTTPResponse.Header.Set("x-char", "a")
+ req.HTTPResponse.Header.Set("x-double", "1.5")
+ req.HTTPResponse.Header.Set("x-false-bool", "false")
+ req.HTTPResponse.Header.Set("x-float", "1.5")
+ req.HTTPResponse.Header.Set("x-int", "1")
+ req.HTTPResponse.Header.Set("x-long", "100")
+ req.HTTPResponse.Header.Set("x-str", "string")
+ req.HTTPResponse.Header.Set("x-timestamp", "Sun, 25 Jan 2015 08:00:00 GMT")
+ req.HTTPResponse.Header.Set("x-true-bool", "true")
+
+ // unmarshal response
+ restxml.UnmarshalMeta(req)
+ restxml.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "a", *out.Char)
+ assert.Equal(t, 1.5, *out.Double)
+ assert.Equal(t, false, *out.FalseBool)
+ assert.Equal(t, 1.5, *out.Float)
+ assert.Equal(t, int64(1), *out.Integer)
+ assert.Equal(t, int64(100), *out.Long)
+ assert.Equal(t, "string", *out.Str)
+ assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String())
+ assert.Equal(t, true, *out.TrueBool)
+
+}
+
+func TestOutputService12ProtocolTestStringCase1(t *testing.T) {
+ svc := NewOutputService12ProtocolTest(nil)
+
+ buf := bytes.NewReader([]byte("operation result string"))
+ req, out := svc.OutputService12TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ restxml.UnmarshalMeta(req)
+ restxml.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "operation result string", *out.String)
+
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/build.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/build.go
new file mode 100644
index 0000000..d3db250
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/build.go
@@ -0,0 +1,287 @@
+// Package xmlutil provides XML serialisation of AWS requests and responses.
+package xmlutil
+
+import (
+ "encoding/base64"
+ "encoding/xml"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// BuildXML will serialize params into an xml.Encoder.
+// Error will be returned if the serialization of any of the params or nested values fails.
+func BuildXML(params interface{}, e *xml.Encoder) error {
+ b := xmlBuilder{encoder: e, namespaces: map[string]string{}}
+ root := NewXMLElement(xml.Name{})
+ if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil {
+ return err
+ }
+ for _, c := range root.Children {
+ for _, v := range c {
+ return StructToXML(e, v, false)
+ }
+ }
+ return nil
+}
+
+// Returns the reflection element of a value, if it is a pointer.
+func elemOf(value reflect.Value) reflect.Value {
+ for value.Kind() == reflect.Ptr {
+ value = value.Elem()
+ }
+ return value
+}
+
+// A xmlBuilder serializes values from Go code to XML
+type xmlBuilder struct {
+ encoder *xml.Encoder
+ namespaces map[string]string
+}
+
+// buildValue generic XMLNode builder for any type. Will build value for their specific type
+// struct, list, map, scalar.
+//
+// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If
+// type is not provided reflect will be used to determine the value's type.
+func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ value = elemOf(value)
+ if !value.IsValid() { // no need to handle zero values
+ return nil
+ } else if tag.Get("location") != "" { // don't handle non-body location values
+ return nil
+ }
+
+ t := tag.Get("type")
+ if t == "" {
+ switch value.Kind() {
+ case reflect.Struct:
+ t = "structure"
+ case reflect.Slice:
+ t = "list"
+ case reflect.Map:
+ t = "map"
+ }
+ }
+
+ switch t {
+ case "structure":
+ if field, ok := value.Type().FieldByName("SDKShapeTraits"); ok {
+ tag = tag + reflect.StructTag(" ") + field.Tag
+ }
+ return b.buildStruct(value, current, tag)
+ case "list":
+ return b.buildList(value, current, tag)
+ case "map":
+ return b.buildMap(value, current, tag)
+ default:
+ return b.buildScalar(value, current, tag)
+ }
+}
+
+// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested
+// types are converted to XMLNodes also.
+func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ if !value.IsValid() {
+ return nil
+ }
+
+ fieldAdded := false
+
+ // unwrap payloads
+ if payload := tag.Get("payload"); payload != "" {
+ field, _ := value.Type().FieldByName(payload)
+ tag = field.Tag
+ value = elemOf(value.FieldByName(payload))
+
+ if !value.IsValid() {
+ return nil
+ }
+ }
+
+ child := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
+
+ // there is an xmlNamespace associated with this struct
+ if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" {
+ ns := xml.Attr{
+ Name: xml.Name{Local: "xmlns"},
+ Value: uri,
+ }
+ if prefix != "" {
+ b.namespaces[prefix] = uri // register the namespace
+ ns.Name.Local = "xmlns:" + prefix
+ }
+
+ child.Attr = append(child.Attr, ns)
+ }
+
+ t := value.Type()
+ for i := 0; i < value.NumField(); i++ {
+ if c := t.Field(i).Name[0:1]; strings.ToLower(c) == c {
+ continue // ignore unexported fields
+ }
+
+ member := elemOf(value.Field(i))
+ field := t.Field(i)
+ mTag := field.Tag
+
+ if mTag.Get("location") != "" { // skip non-body members
+ continue
+ }
+
+ memberName := mTag.Get("locationName")
+ if memberName == "" {
+ memberName = field.Name
+ mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`)
+ }
+ if err := b.buildValue(member, child, mTag); err != nil {
+ return err
+ }
+
+ fieldAdded = true
+ }
+
+ if fieldAdded { // only append this child if we have one ore more valid members
+ current.AddChild(child)
+ }
+
+ return nil
+}
+
+// buildList adds the value's list items to the current XMLNode as children nodes. All
+// nested values in the list are converted to XMLNodes also.
+func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ if value.IsNil() { // don't build omitted lists
+ return nil
+ }
+
+ // check for unflattened list member
+ flattened := tag.Get("flattened") != ""
+
+ xname := xml.Name{Local: tag.Get("locationName")}
+ if flattened {
+ for i := 0; i < value.Len(); i++ {
+ child := NewXMLElement(xname)
+ current.AddChild(child)
+ if err := b.buildValue(value.Index(i), child, ""); err != nil {
+ return err
+ }
+ }
+ } else {
+ list := NewXMLElement(xname)
+ current.AddChild(list)
+
+ for i := 0; i < value.Len(); i++ {
+ iname := tag.Get("locationNameList")
+ if iname == "" {
+ iname = "member"
+ }
+
+ child := NewXMLElement(xml.Name{Local: iname})
+ list.AddChild(child)
+ if err := b.buildValue(value.Index(i), child, ""); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All
+// nested values in the map are converted to XMLNodes also.
+//
+// Error will be returned if it is unable to build the map's values into XMLNodes
+func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ if value.IsNil() { // don't build omitted maps
+ return nil
+ }
+
+ maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
+ current.AddChild(maproot)
+ current = maproot
+
+ kname, vname := "key", "value"
+ if n := tag.Get("locationNameKey"); n != "" {
+ kname = n
+ }
+ if n := tag.Get("locationNameValue"); n != "" {
+ vname = n
+ }
+
+ // sorting is not required for compliance, but it makes testing easier
+ keys := make([]string, value.Len())
+ for i, k := range value.MapKeys() {
+ keys[i] = k.String()
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ v := value.MapIndex(reflect.ValueOf(k))
+
+ mapcur := current
+ if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps
+ child := NewXMLElement(xml.Name{Local: "entry"})
+ mapcur.AddChild(child)
+ mapcur = child
+ }
+
+ kchild := NewXMLElement(xml.Name{Local: kname})
+ kchild.Text = k
+ vchild := NewXMLElement(xml.Name{Local: vname})
+ mapcur.AddChild(kchild)
+ mapcur.AddChild(vchild)
+
+ if err := b.buildValue(v, vchild, ""); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// buildScalar will convert the value into a string and append it as a attribute or child
+// of the current XMLNode.
+//
+// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value.
+//
+// Error will be returned if the value type is unsupported.
+func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ var str string
+ switch converted := value.Interface().(type) {
+ case string:
+ str = converted
+ case []byte:
+ if !value.IsNil() {
+ str = base64.StdEncoding.EncodeToString(converted)
+ }
+ case bool:
+ str = strconv.FormatBool(converted)
+ case int64:
+ str = strconv.FormatInt(converted, 10)
+ case int:
+ str = strconv.Itoa(converted)
+ case float64:
+ str = strconv.FormatFloat(converted, 'f', -1, 64)
+ case float32:
+ str = strconv.FormatFloat(float64(converted), 'f', -1, 32)
+ case time.Time:
+ const ISO8601UTC = "2006-01-02T15:04:05Z"
+ str = converted.UTC().Format(ISO8601UTC)
+ default:
+ return fmt.Errorf("unsupported value for param %s: %v (%s)",
+ tag.Get("locationName"), value.Interface(), value.Type().Name())
+ }
+
+ xname := xml.Name{Local: tag.Get("locationName")}
+ if tag.Get("xmlAttribute") != "" { // put into current node's attribute list
+ attr := xml.Attr{Name: xname, Value: str}
+ current.Attr = append(current.Attr, attr)
+ } else { // regular text node
+ current.AddChild(&XMLNode{Name: xname, Text: str})
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/unmarshal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/unmarshal.go
new file mode 100644
index 0000000..5e4fe21
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/unmarshal.go
@@ -0,0 +1,260 @@
+package xmlutil
+
+import (
+ "encoding/base64"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// UnmarshalXML deserializes an xml.Decoder into the container v. V
+// needs to match the shape of the XML expected to be decoded.
+// If the shape doesn't match unmarshaling will fail.
+func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error {
+ n, _ := XMLToStruct(d, nil)
+ if n.Children != nil {
+ for _, root := range n.Children {
+ for _, c := range root {
+ if wrappedChild, ok := c.Children[wrapper]; ok {
+ c = wrappedChild[0] // pull out wrapped element
+ }
+
+ err := parse(reflect.ValueOf(v), c, "")
+ if err != nil {
+ if err == io.EOF {
+ return nil
+ }
+ return err
+ }
+ }
+ }
+ return nil
+ }
+ return nil
+}
+
+// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect
+// will be used to determine the type from r.
+func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ rtype := r.Type()
+ if rtype.Kind() == reflect.Ptr {
+ rtype = rtype.Elem() // check kind of actual element type
+ }
+
+ t := tag.Get("type")
+ if t == "" {
+ switch rtype.Kind() {
+ case reflect.Struct:
+ t = "structure"
+ case reflect.Slice:
+ t = "list"
+ case reflect.Map:
+ t = "map"
+ }
+ }
+
+ switch t {
+ case "structure":
+ if field, ok := rtype.FieldByName("SDKShapeTraits"); ok {
+ tag = field.Tag
+ }
+ return parseStruct(r, node, tag)
+ case "list":
+ return parseList(r, node, tag)
+ case "map":
+ return parseMap(r, node, tag)
+ default:
+ return parseScalar(r, node, tag)
+ }
+}
+
+// parseStruct deserializes a structure and its fields from an XMLNode. Any nested
+// types in the structure will also be deserialized.
+func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ t := r.Type()
+ if r.Kind() == reflect.Ptr {
+ if r.IsNil() { // create the structure if it's nil
+ s := reflect.New(r.Type().Elem())
+ r.Set(s)
+ r = s
+ }
+
+ r = r.Elem()
+ t = t.Elem()
+ }
+
+ // unwrap any payloads
+ if payload := tag.Get("payload"); payload != "" {
+ field, _ := t.FieldByName(payload)
+ return parseStruct(r.FieldByName(payload), node, field.Tag)
+ }
+
+ for i := 0; i < t.NumField(); i++ {
+ field := t.Field(i)
+ if c := field.Name[0:1]; strings.ToLower(c) == c {
+ continue // ignore unexported fields
+ }
+
+ // figure out what this field is called
+ name := field.Name
+ if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
+ name = field.Tag.Get("locationNameList")
+ } else if locName := field.Tag.Get("locationName"); locName != "" {
+ name = locName
+ }
+
+ // try to find the field by name in elements
+ elems := node.Children[name]
+
+ if elems == nil { // try to find the field in attributes
+ for _, a := range node.Attr {
+ if name == a.Name.Local {
+ // turn this into a text node for de-serializing
+ elems = []*XMLNode{{Text: a.Value}}
+ }
+ }
+ }
+
+ member := r.FieldByName(field.Name)
+ for _, elem := range elems {
+ err := parse(member, elem, field.Tag)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// parseList deserializes a list of values from an XML node. Each list entry
+// will also be deserialized.
+func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ t := r.Type()
+
+ if tag.Get("flattened") == "" { // look at all item entries
+ mname := "member"
+ if name := tag.Get("locationNameList"); name != "" {
+ mname = name
+ }
+
+ if Children, ok := node.Children[mname]; ok {
+ if r.IsNil() {
+ r.Set(reflect.MakeSlice(t, len(Children), len(Children)))
+ }
+
+ for i, c := range Children {
+ err := parse(r.Index(i), c, "")
+ if err != nil {
+ return err
+ }
+ }
+ }
+ } else { // flattened list means this is a single element
+ if r.IsNil() {
+ r.Set(reflect.MakeSlice(t, 0, 0))
+ }
+
+ childR := reflect.Zero(t.Elem())
+ r.Set(reflect.Append(r, childR))
+ err := parse(r.Index(r.Len()-1), node, "")
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode
+// will also be deserialized as map entries.
+func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ if r.IsNil() {
+ r.Set(reflect.MakeMap(r.Type()))
+ }
+
+ if tag.Get("flattened") == "" { // look at all child entries
+ for _, entry := range node.Children["entry"] {
+ parseMapEntry(r, entry, tag)
+ }
+ } else { // this element is itself an entry
+ parseMapEntry(r, node, tag)
+ }
+
+ return nil
+}
+
+// parseMapEntry deserializes a map entry from a XML node.
+func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ kname, vname := "key", "value"
+ if n := tag.Get("locationNameKey"); n != "" {
+ kname = n
+ }
+ if n := tag.Get("locationNameValue"); n != "" {
+ vname = n
+ }
+
+ keys, ok := node.Children[kname]
+ values := node.Children[vname]
+ if ok {
+ for i, key := range keys {
+ keyR := reflect.ValueOf(key.Text)
+ value := values[i]
+ valueR := reflect.New(r.Type().Elem()).Elem()
+
+ parse(valueR, value, "")
+ r.SetMapIndex(keyR, valueR)
+ }
+ }
+ return nil
+}
+
+// parseScaller deserializes an XMLNode value into a concrete type based on the
+// interface type of r.
+//
+// Error is returned if the deserialization fails due to invalid type conversion,
+// or unsupported interface type.
+func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ switch r.Interface().(type) {
+ case *string:
+ r.Set(reflect.ValueOf(&node.Text))
+ return nil
+ case []byte:
+ b, err := base64.StdEncoding.DecodeString(node.Text)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(b))
+ case *bool:
+ v, err := strconv.ParseBool(node.Text)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&v))
+ case *int64:
+ v, err := strconv.ParseInt(node.Text, 10, 64)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&v))
+ case *float64:
+ v, err := strconv.ParseFloat(node.Text, 64)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&v))
+ case *time.Time:
+ const ISO8601UTC = "2006-01-02T15:04:05Z"
+ t, err := time.Parse(ISO8601UTC, node.Text)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&t))
+ default:
+ return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type())
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/xml_to_struct.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/xml_to_struct.go
new file mode 100644
index 0000000..72c198a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/xml_to_struct.go
@@ -0,0 +1,105 @@
+package xmlutil
+
+import (
+ "encoding/xml"
+ "io"
+ "sort"
+)
+
+// A XMLNode contains the values to be encoded or decoded.
+type XMLNode struct {
+ Name xml.Name `json:",omitempty"`
+ Children map[string][]*XMLNode `json:",omitempty"`
+ Text string `json:",omitempty"`
+ Attr []xml.Attr `json:",omitempty"`
+}
+
+// NewXMLElement returns a pointer to a new XMLNode initialized to default values.
+func NewXMLElement(name xml.Name) *XMLNode {
+ return &XMLNode{
+ Name: name,
+ Children: map[string][]*XMLNode{},
+ Attr: []xml.Attr{},
+ }
+}
+
+// AddChild adds child to the XMLNode.
+func (n *XMLNode) AddChild(child *XMLNode) {
+ if _, ok := n.Children[child.Name.Local]; !ok {
+ n.Children[child.Name.Local] = []*XMLNode{}
+ }
+ n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child)
+}
+
+// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values.
+func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) {
+ out := &XMLNode{}
+ for {
+ tok, err := d.Token()
+ if tok == nil || err == io.EOF {
+ break
+ }
+ if err != nil {
+ return out, err
+ }
+
+ switch typed := tok.(type) {
+ case xml.CharData:
+ out.Text = string(typed.Copy())
+ case xml.StartElement:
+ el := typed.Copy()
+ out.Attr = el.Attr
+ if out.Children == nil {
+ out.Children = map[string][]*XMLNode{}
+ }
+
+ name := typed.Name.Local
+ slice := out.Children[name]
+ if slice == nil {
+ slice = []*XMLNode{}
+ }
+ node, e := XMLToStruct(d, &el)
+ if e != nil {
+ return out, e
+ }
+ node.Name = typed.Name
+ slice = append(slice, node)
+ out.Children[name] = slice
+ case xml.EndElement:
+ if s != nil && s.Name.Local == typed.Name.Local { // matching end token
+ return out, nil
+ }
+ }
+ }
+ return out, nil
+}
+
+// StructToXML writes an XMLNode to a xml.Encoder as tokens.
+func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error {
+ e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr})
+
+ if node.Text != "" {
+ e.EncodeToken(xml.CharData([]byte(node.Text)))
+ } else if sorted {
+ sortedNames := []string{}
+ for k := range node.Children {
+ sortedNames = append(sortedNames, k)
+ }
+ sort.Strings(sortedNames)
+
+ for _, k := range sortedNames {
+ for _, v := range node.Children[k] {
+ StructToXML(e, v, sorted)
+ }
+ }
+ } else {
+ for _, c := range node.Children {
+ for _, v := range c {
+ StructToXML(e, v, sorted)
+ }
+ }
+ }
+
+ e.EncodeToken(xml.EndElement{Name: node.Name})
+ return e.Flush()
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/functional_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/functional_test.go
new file mode 100644
index 0000000..fbb0e41
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/functional_test.go
@@ -0,0 +1,43 @@
+package v4_test
+
+import (
+ "net/url"
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/internal/test/unit"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/stretchr/testify/assert"
+)
+
+var _ = unit.Imported
+
+func TestPresignHandler(t *testing.T) {
+ svc := s3.New(nil)
+ req, _ := svc.PutObjectRequest(&s3.PutObjectInput{
+ Bucket: aws.String("bucket"),
+ Key: aws.String("key"),
+ ContentDisposition: aws.String("a+b c$d"),
+ ACL: aws.String("public-read"),
+ })
+ req.Time = time.Unix(0, 0)
+ urlstr, err := req.Presign(5 * time.Minute)
+
+ assert.NoError(t, err)
+
+ expectedDate := "19700101T000000Z"
+ expectedHeaders := "host;x-amz-acl"
+ expectedSig := "7edcb4e3a1bf12f4989018d75acbe3a7f03df24bd6f3112602d59fc551f0e4e2"
+ expectedCred := "AKID/19700101/mock-region/s3/aws4_request"
+
+ u, _ := url.Parse(urlstr)
+ urlQ := u.Query()
+ assert.Equal(t, expectedSig, urlQ.Get("X-Amz-Signature"))
+ assert.Equal(t, expectedCred, urlQ.Get("X-Amz-Credential"))
+ assert.Equal(t, expectedHeaders, urlQ.Get("X-Amz-SignedHeaders"))
+ assert.Equal(t, expectedDate, urlQ.Get("X-Amz-Date"))
+ assert.Equal(t, "300", urlQ.Get("X-Amz-Expires"))
+
+ assert.NotContains(t, urlstr, "+") // + encoded as %20
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/v4.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/v4.go
new file mode 100644
index 0000000..6fef0d6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/v4.go
@@ -0,0 +1,360 @@
+// Package v4 implements signing for AWS V4 signer
+package v4
+
+import (
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/internal/protocol/rest"
+
+ "github.com/aws/aws-sdk-go/aws"
+)
+
+const (
+ authHeaderPrefix = "AWS4-HMAC-SHA256"
+ timeFormat = "20060102T150405Z"
+ shortTimeFormat = "20060102"
+)
+
+var ignoredHeaders = map[string]bool{
+ "Authorization": true,
+ "Content-Type": true,
+ "Content-Length": true,
+ "User-Agent": true,
+}
+
+type signer struct {
+ Request *http.Request
+ Time time.Time
+ ExpireTime time.Duration
+ ServiceName string
+ Region string
+ CredValues credentials.Value
+ Credentials *credentials.Credentials
+ Query url.Values
+ Body io.ReadSeeker
+ Debug uint
+ Logger io.Writer
+
+ isPresign bool
+ formattedTime string
+ formattedShortTime string
+
+ signedHeaders string
+ canonicalHeaders string
+ canonicalString string
+ credentialString string
+ stringToSign string
+ signature string
+ authorization string
+}
+
+// Sign requests with signature version 4.
+//
+// Will sign the requests with the service config's Credentials object
+// Signing is skipped if the credentials is the credentials.AnonymousCredentials
+// object.
+func Sign(req *aws.Request) {
+ // If the request does not need to be signed ignore the signing of the
+ // request if the AnonymousCredentials object is used.
+ if req.Service.Config.Credentials == credentials.AnonymousCredentials {
+ return
+ }
+
+ region := req.Service.SigningRegion
+ if region == "" {
+ region = req.Service.Config.Region
+ }
+
+ name := req.Service.SigningName
+ if name == "" {
+ name = req.Service.ServiceName
+ }
+
+ s := signer{
+ Request: req.HTTPRequest,
+ Time: req.Time,
+ ExpireTime: req.ExpireTime,
+ Query: req.HTTPRequest.URL.Query(),
+ Body: req.Body,
+ ServiceName: name,
+ Region: region,
+ Credentials: req.Service.Config.Credentials,
+ Debug: req.Service.Config.LogLevel,
+ Logger: req.Service.Config.Logger,
+ }
+
+ req.Error = s.sign()
+}
+
+func (v4 *signer) sign() error {
+ if v4.ExpireTime != 0 {
+ v4.isPresign = true
+ }
+
+ if v4.isRequestSigned() {
+ if !v4.Credentials.IsExpired() {
+ // If the request is already signed, and the credentials have not
+ // expired yet ignore the signing request.
+ return nil
+ }
+
+ // The credentials have expired for this request. The current signing
+ // is invalid, and needs to be request because the request will fail.
+ if v4.isPresign {
+ v4.removePresign()
+ // Update the request's query string to ensure the values stays in
+ // sync in the case retrieving the new credentials fails.
+ v4.Request.URL.RawQuery = v4.Query.Encode()
+ }
+ }
+
+ var err error
+ v4.CredValues, err = v4.Credentials.Get()
+ if err != nil {
+ return err
+ }
+
+ if v4.isPresign {
+ v4.Query.Set("X-Amz-Algorithm", authHeaderPrefix)
+ if v4.CredValues.SessionToken != "" {
+ v4.Query.Set("X-Amz-Security-Token", v4.CredValues.SessionToken)
+ } else {
+ v4.Query.Del("X-Amz-Security-Token")
+ }
+ } else if v4.CredValues.SessionToken != "" {
+ v4.Request.Header.Set("X-Amz-Security-Token", v4.CredValues.SessionToken)
+ }
+
+ v4.build()
+
+ if v4.Debug > 0 {
+ v4.logSigningInfo()
+ }
+
+ return nil
+}
+
+func (v4 *signer) logSigningInfo() {
+ out := v4.Logger
+ fmt.Fprintf(out, "---[ CANONICAL STRING ]-----------------------------\n")
+ fmt.Fprintln(out, v4.canonicalString)
+ fmt.Fprintf(out, "---[ STRING TO SIGN ]--------------------------------\n")
+ fmt.Fprintln(out, v4.stringToSign)
+ if v4.isPresign {
+ fmt.Fprintf(out, "---[ SIGNED URL ]--------------------------------\n")
+ fmt.Fprintln(out, v4.Request.URL)
+ }
+ fmt.Fprintf(out, "-----------------------------------------------------\n")
+}
+
+func (v4 *signer) build() {
+
+ v4.buildTime() // no depends
+ v4.buildCredentialString() // no depends
+ if v4.isPresign {
+ v4.buildQuery() // no depends
+ }
+ v4.buildCanonicalHeaders() // depends on cred string
+ v4.buildCanonicalString() // depends on canon headers / signed headers
+ v4.buildStringToSign() // depends on canon string
+ v4.buildSignature() // depends on string to sign
+
+ if v4.isPresign {
+ v4.Request.URL.RawQuery += "&X-Amz-Signature=" + v4.signature
+ } else {
+ parts := []string{
+ authHeaderPrefix + " Credential=" + v4.CredValues.AccessKeyID + "/" + v4.credentialString,
+ "SignedHeaders=" + v4.signedHeaders,
+ "Signature=" + v4.signature,
+ }
+ v4.Request.Header.Set("Authorization", strings.Join(parts, ", "))
+ }
+}
+
+func (v4 *signer) buildTime() {
+ v4.formattedTime = v4.Time.UTC().Format(timeFormat)
+ v4.formattedShortTime = v4.Time.UTC().Format(shortTimeFormat)
+
+ if v4.isPresign {
+ duration := int64(v4.ExpireTime / time.Second)
+ v4.Query.Set("X-Amz-Date", v4.formattedTime)
+ v4.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
+ } else {
+ v4.Request.Header.Set("X-Amz-Date", v4.formattedTime)
+ }
+}
+
+func (v4 *signer) buildCredentialString() {
+ v4.credentialString = strings.Join([]string{
+ v4.formattedShortTime,
+ v4.Region,
+ v4.ServiceName,
+ "aws4_request",
+ }, "/")
+
+ if v4.isPresign {
+ v4.Query.Set("X-Amz-Credential", v4.CredValues.AccessKeyID+"/"+v4.credentialString)
+ }
+}
+
+func (v4 *signer) buildQuery() {
+ for k, h := range v4.Request.Header {
+ if strings.HasPrefix(http.CanonicalHeaderKey(k), "X-Amz-") {
+ continue // never hoist x-amz-* headers, they must be signed
+ }
+ if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
+ continue // never hoist ignored headers
+ }
+
+ v4.Request.Header.Del(k)
+ v4.Query.Del(k)
+ for _, v := range h {
+ v4.Query.Add(k, v)
+ }
+ }
+}
+
+func (v4 *signer) buildCanonicalHeaders() {
+ var headers []string
+ headers = append(headers, "host")
+ for k := range v4.Request.Header {
+ if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
+ continue // ignored header
+ }
+ headers = append(headers, strings.ToLower(k))
+ }
+ sort.Strings(headers)
+
+ v4.signedHeaders = strings.Join(headers, ";")
+
+ if v4.isPresign {
+ v4.Query.Set("X-Amz-SignedHeaders", v4.signedHeaders)
+ }
+
+ headerValues := make([]string, len(headers))
+ for i, k := range headers {
+ if k == "host" {
+ headerValues[i] = "host:" + v4.Request.URL.Host
+ } else {
+ headerValues[i] = k + ":" +
+ strings.Join(v4.Request.Header[http.CanonicalHeaderKey(k)], ",")
+ }
+ }
+
+ v4.canonicalHeaders = strings.Join(headerValues, "\n")
+}
+
+func (v4 *signer) buildCanonicalString() {
+ v4.Request.URL.RawQuery = strings.Replace(v4.Query.Encode(), "+", "%20", -1)
+ uri := v4.Request.URL.Opaque
+ if uri != "" {
+ uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/")
+ } else {
+ uri = v4.Request.URL.Path
+ }
+ if uri == "" {
+ uri = "/"
+ }
+
+ if v4.ServiceName != "s3" {
+ uri = rest.EscapePath(uri, false)
+ }
+
+ v4.canonicalString = strings.Join([]string{
+ v4.Request.Method,
+ uri,
+ v4.Request.URL.RawQuery,
+ v4.canonicalHeaders + "\n",
+ v4.signedHeaders,
+ v4.bodyDigest(),
+ }, "\n")
+}
+
+func (v4 *signer) buildStringToSign() {
+ v4.stringToSign = strings.Join([]string{
+ authHeaderPrefix,
+ v4.formattedTime,
+ v4.credentialString,
+ hex.EncodeToString(makeSha256([]byte(v4.canonicalString))),
+ }, "\n")
+}
+
+func (v4 *signer) buildSignature() {
+ secret := v4.CredValues.SecretAccessKey
+ date := makeHmac([]byte("AWS4"+secret), []byte(v4.formattedShortTime))
+ region := makeHmac(date, []byte(v4.Region))
+ service := makeHmac(region, []byte(v4.ServiceName))
+ credentials := makeHmac(service, []byte("aws4_request"))
+ signature := makeHmac(credentials, []byte(v4.stringToSign))
+ v4.signature = hex.EncodeToString(signature)
+}
+
+func (v4 *signer) bodyDigest() string {
+ hash := v4.Request.Header.Get("X-Amz-Content-Sha256")
+ if hash == "" {
+ if v4.isPresign && v4.ServiceName == "s3" {
+ hash = "UNSIGNED-PAYLOAD"
+ } else if v4.Body == nil {
+ hash = hex.EncodeToString(makeSha256([]byte{}))
+ } else {
+ hash = hex.EncodeToString(makeSha256Reader(v4.Body))
+ }
+ v4.Request.Header.Add("X-Amz-Content-Sha256", hash)
+ }
+ return hash
+}
+
+// isRequestSigned returns if the request is currently signed or presigned
+func (v4 *signer) isRequestSigned() bool {
+ if v4.isPresign && v4.Query.Get("X-Amz-Signature") != "" {
+ return true
+ }
+ if v4.Request.Header.Get("Authorization") != "" {
+ return true
+ }
+
+ return false
+}
+
+// unsign removes signing flags for both signed and presigned requests.
+func (v4 *signer) removePresign() {
+ v4.Query.Del("X-Amz-Algorithm")
+ v4.Query.Del("X-Amz-Signature")
+ v4.Query.Del("X-Amz-Security-Token")
+ v4.Query.Del("X-Amz-Date")
+ v4.Query.Del("X-Amz-Expires")
+ v4.Query.Del("X-Amz-Credential")
+ v4.Query.Del("X-Amz-SignedHeaders")
+}
+
+func makeHmac(key []byte, data []byte) []byte {
+ hash := hmac.New(sha256.New, key)
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+func makeSha256(data []byte) []byte {
+ hash := sha256.New()
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+func makeSha256Reader(reader io.ReadSeeker) []byte {
+ hash := sha256.New()
+ start, _ := reader.Seek(0, 1)
+ defer reader.Seek(start, 0)
+
+ io.Copy(hash, reader)
+ return hash.Sum(nil)
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/v4_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/v4_test.go
new file mode 100644
index 0000000..99966f8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/v4_test.go
@@ -0,0 +1,245 @@
+package v4
+
+import (
+ "net/http"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/stretchr/testify/assert"
+)
+
+func buildSigner(serviceName string, region string, signTime time.Time, expireTime time.Duration, body string) signer {
+ endpoint := "https://" + serviceName + "." + region + ".amazonaws.com"
+ reader := strings.NewReader(body)
+ req, _ := http.NewRequest("POST", endpoint, reader)
+ req.URL.Opaque = "//example.org/bucket/key-._~,!@#$%^&*()"
+ req.Header.Add("X-Amz-Target", "prefix.Operation")
+ req.Header.Add("Content-Type", "application/x-amz-json-1.0")
+ req.Header.Add("Content-Length", string(len(body)))
+ req.Header.Add("X-Amz-Meta-Other-Header", "some-value=!@#$%^&* (+)")
+
+ return signer{
+ Request: req,
+ Time: signTime,
+ ExpireTime: expireTime,
+ Query: req.URL.Query(),
+ Body: reader,
+ ServiceName: serviceName,
+ Region: region,
+ Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"),
+ }
+}
+
+func removeWS(text string) string {
+ text = strings.Replace(text, " ", "", -1)
+ text = strings.Replace(text, "\n", "", -1)
+ text = strings.Replace(text, "\t", "", -1)
+ return text
+}
+
+func assertEqual(t *testing.T, expected, given string) {
+ if removeWS(expected) != removeWS(given) {
+ t.Errorf("\nExpected: %s\nGiven: %s", expected, given)
+ }
+}
+
+func TestPresignRequest(t *testing.T) {
+ signer := buildSigner("dynamodb", "us-east-1", time.Unix(0, 0), 300*time.Second, "{}")
+ signer.sign()
+
+ expectedDate := "19700101T000000Z"
+ expectedHeaders := "host;x-amz-meta-other-header;x-amz-target"
+ expectedSig := "5eeedebf6f995145ce56daa02902d10485246d3defb34f97b973c1f40ab82d36"
+ expectedCred := "AKID/19700101/us-east-1/dynamodb/aws4_request"
+
+ q := signer.Request.URL.Query()
+ assert.Equal(t, expectedSig, q.Get("X-Amz-Signature"))
+ assert.Equal(t, expectedCred, q.Get("X-Amz-Credential"))
+ assert.Equal(t, expectedHeaders, q.Get("X-Amz-SignedHeaders"))
+ assert.Equal(t, expectedDate, q.Get("X-Amz-Date"))
+}
+
+func TestSignRequest(t *testing.T) {
+ signer := buildSigner("dynamodb", "us-east-1", time.Unix(0, 0), 0, "{}")
+ signer.sign()
+
+ expectedDate := "19700101T000000Z"
+ expectedSig := "AWS4-HMAC-SHA256 Credential=AKID/19700101/us-east-1/dynamodb/aws4_request, SignedHeaders=host;x-amz-date;x-amz-meta-other-header;x-amz-security-token;x-amz-target, Signature=69ada33fec48180dab153576e4dd80c4e04124f80dda3eccfed8a67c2b91ed5e"
+
+ q := signer.Request.Header
+ assert.Equal(t, expectedSig, q.Get("Authorization"))
+ assert.Equal(t, expectedDate, q.Get("X-Amz-Date"))
+}
+
+func TestSignEmptyBody(t *testing.T) {
+ signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "")
+ signer.Body = nil
+ signer.sign()
+ hash := signer.Request.Header.Get("X-Amz-Content-Sha256")
+ assert.Equal(t, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", hash)
+}
+
+func TestSignBody(t *testing.T) {
+ signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "hello")
+ signer.sign()
+ hash := signer.Request.Header.Get("X-Amz-Content-Sha256")
+ assert.Equal(t, "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824", hash)
+}
+
+func TestSignSeekedBody(t *testing.T) {
+ signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, " hello")
+ signer.Body.Read(make([]byte, 3)) // consume first 3 bytes so body is now "hello"
+ signer.sign()
+ hash := signer.Request.Header.Get("X-Amz-Content-Sha256")
+ assert.Equal(t, "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824", hash)
+
+ start, _ := signer.Body.Seek(0, 1)
+ assert.Equal(t, int64(3), start)
+}
+
+func TestPresignEmptyBodyS3(t *testing.T) {
+ signer := buildSigner("s3", "us-east-1", time.Now(), 5*time.Minute, "hello")
+ signer.sign()
+ hash := signer.Request.Header.Get("X-Amz-Content-Sha256")
+ assert.Equal(t, "UNSIGNED-PAYLOAD", hash)
+}
+
+func TestSignPrecomputedBodyChecksum(t *testing.T) {
+ signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "hello")
+ signer.Request.Header.Set("X-Amz-Content-Sha256", "PRECOMPUTED")
+ signer.sign()
+ hash := signer.Request.Header.Get("X-Amz-Content-Sha256")
+ assert.Equal(t, "PRECOMPUTED", hash)
+}
+
+func TestAnonymousCredentials(t *testing.T) {
+ r := aws.NewRequest(
+ aws.NewService(&aws.Config{Credentials: credentials.AnonymousCredentials}),
+ &aws.Operation{
+ Name: "BatchGetItem",
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ },
+ nil,
+ nil,
+ )
+ Sign(r)
+
+ urlQ := r.HTTPRequest.URL.Query()
+ assert.Empty(t, urlQ.Get("X-Amz-Signature"))
+ assert.Empty(t, urlQ.Get("X-Amz-Credential"))
+ assert.Empty(t, urlQ.Get("X-Amz-SignedHeaders"))
+ assert.Empty(t, urlQ.Get("X-Amz-Date"))
+
+ hQ := r.HTTPRequest.Header
+ assert.Empty(t, hQ.Get("Authorization"))
+ assert.Empty(t, hQ.Get("X-Amz-Date"))
+}
+
+func TestIgnoreResignRequestWithValidCreds(t *testing.T) {
+ r := aws.NewRequest(
+ aws.NewService(&aws.Config{
+ Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"),
+ Region: "us-west-2",
+ }),
+ &aws.Operation{
+ Name: "BatchGetItem",
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ },
+ nil,
+ nil,
+ )
+
+ Sign(r)
+ sig := r.HTTPRequest.Header.Get("Authorization")
+
+ Sign(r)
+ assert.Equal(t, sig, r.HTTPRequest.Header.Get("Authorization"))
+}
+
+func TestIgnorePreResignRequestWithValidCreds(t *testing.T) {
+ r := aws.NewRequest(
+ aws.NewService(&aws.Config{
+ Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"),
+ Region: "us-west-2",
+ }),
+ &aws.Operation{
+ Name: "BatchGetItem",
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ },
+ nil,
+ nil,
+ )
+ r.ExpireTime = time.Minute * 10
+
+ Sign(r)
+ sig := r.HTTPRequest.Header.Get("X-Amz-Signature")
+
+ Sign(r)
+ assert.Equal(t, sig, r.HTTPRequest.Header.Get("X-Amz-Signature"))
+}
+
+func TestResignRequestExpiredCreds(t *testing.T) {
+ creds := credentials.NewStaticCredentials("AKID", "SECRET", "SESSION")
+ r := aws.NewRequest(
+ aws.NewService(&aws.Config{Credentials: creds}),
+ &aws.Operation{
+ Name: "BatchGetItem",
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ },
+ nil,
+ nil,
+ )
+ Sign(r)
+ querySig := r.HTTPRequest.Header.Get("Authorization")
+
+ creds.Expire()
+
+ Sign(r)
+ assert.NotEqual(t, querySig, r.HTTPRequest.Header.Get("Authorization"))
+}
+
+func TestPreResignRequestExpiredCreds(t *testing.T) {
+ provider := &credentials.StaticProvider{credentials.Value{"AKID", "SECRET", "SESSION"}}
+ creds := credentials.NewCredentials(provider)
+ r := aws.NewRequest(
+ aws.NewService(&aws.Config{Credentials: creds}),
+ &aws.Operation{
+ Name: "BatchGetItem",
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ },
+ nil,
+ nil,
+ )
+ r.ExpireTime = time.Minute * 10
+
+ Sign(r)
+ querySig := r.HTTPRequest.URL.Query().Get("X-Amz-Signature")
+
+ creds.Expire()
+ r.Time = time.Now().Add(time.Hour * 48)
+
+ Sign(r)
+ assert.NotEqual(t, querySig, r.HTTPRequest.URL.Query().Get("X-Amz-Signature"))
+}
+
+func BenchmarkPresignRequest(b *testing.B) {
+ signer := buildSigner("dynamodb", "us-east-1", time.Now(), 300*time.Second, "{}")
+ for i := 0; i < b.N; i++ {
+ signer.sign()
+ }
+}
+
+func BenchmarkSignRequest(b *testing.B) {
+ signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "{}")
+ for i := 0; i < b.N; i++ {
+ signer.sign()
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/api.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/api.go
new file mode 100644
index 0000000..e702583
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/api.go
@@ -0,0 +1,4804 @@
+// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+
+// Package s3 provides a client for Amazon Simple Storage Service.
+package s3
+
+import (
+ "io"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+)
+
+const opAbortMultipartUpload = "AbortMultipartUpload"
+
+// AbortMultipartUploadRequest generates a request for the AbortMultipartUpload operation.
+func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *aws.Request, output *AbortMultipartUploadOutput) {
+ op := &aws.Operation{
+ Name: opAbortMultipartUpload,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &AbortMultipartUploadInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &AbortMultipartUploadOutput{}
+ req.Data = output
+ return
+}
+
+// Aborts a multipart upload.
+//
+// To verify that all parts have been removed, so you don't get charged for
+// the part storage, you should call the List Parts operation and ensure the
+// parts list is empty.
+func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) {
+ req, out := c.AbortMultipartUploadRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opCompleteMultipartUpload = "CompleteMultipartUpload"
+
+// CompleteMultipartUploadRequest generates a request for the CompleteMultipartUpload operation.
+func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *aws.Request, output *CompleteMultipartUploadOutput) {
+ op := &aws.Operation{
+ Name: opCompleteMultipartUpload,
+ HTTPMethod: "POST",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &CompleteMultipartUploadInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &CompleteMultipartUploadOutput{}
+ req.Data = output
+ return
+}
+
+// Completes a multipart upload by assembling previously uploaded parts.
+func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) {
+ req, out := c.CompleteMultipartUploadRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opCopyObject = "CopyObject"
+
+// CopyObjectRequest generates a request for the CopyObject operation.
+func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *aws.Request, output *CopyObjectOutput) {
+ op := &aws.Operation{
+ Name: opCopyObject,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &CopyObjectInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &CopyObjectOutput{}
+ req.Data = output
+ return
+}
+
+// Creates a copy of an object that is already stored in Amazon S3.
+func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) {
+ req, out := c.CopyObjectRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opCreateBucket = "CreateBucket"
+
+// CreateBucketRequest generates a request for the CreateBucket operation.
+func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *aws.Request, output *CreateBucketOutput) {
+ op := &aws.Operation{
+ Name: opCreateBucket,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}",
+ }
+
+ if input == nil {
+ input = &CreateBucketInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &CreateBucketOutput{}
+ req.Data = output
+ return
+}
+
+// Creates a new bucket.
+func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) {
+ req, out := c.CreateBucketRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opCreateMultipartUpload = "CreateMultipartUpload"
+
+// CreateMultipartUploadRequest generates a request for the CreateMultipartUpload operation.
+func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *aws.Request, output *CreateMultipartUploadOutput) {
+ op := &aws.Operation{
+ Name: opCreateMultipartUpload,
+ HTTPMethod: "POST",
+ HTTPPath: "/{Bucket}/{Key+}?uploads",
+ }
+
+ if input == nil {
+ input = &CreateMultipartUploadInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &CreateMultipartUploadOutput{}
+ req.Data = output
+ return
+}
+
+// Initiates a multipart upload and returns an upload ID.
+//
+// Note: After you initiate multipart upload and upload one or more parts,
+// you must either complete or abort multipart upload in order to stop getting
+// charged for storage of the uploaded parts. Only after you either complete
+// or abort multipart upload, Amazon S3 frees up the parts storage and stops
+// charging you for the parts storage.
+func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) {
+ req, out := c.CreateMultipartUploadRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opDeleteBucket = "DeleteBucket"
+
+// DeleteBucketRequest generates a request for the DeleteBucket operation.
+func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *aws.Request, output *DeleteBucketOutput) {
+ op := &aws.Operation{
+ Name: opDeleteBucket,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}",
+ }
+
+ if input == nil {
+ input = &DeleteBucketInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &DeleteBucketOutput{}
+ req.Data = output
+ return
+}
+
+// Deletes the bucket. All objects (including all object versions and Delete
+// Markers) in the bucket must be deleted before the bucket itself can be deleted.
+func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) {
+ req, out := c.DeleteBucketRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opDeleteBucketCORS = "DeleteBucketCors"
+
+// DeleteBucketCORSRequest generates a request for the DeleteBucketCORS operation.
+func (c *S3) DeleteBucketCORSRequest(input *DeleteBucketCORSInput) (req *aws.Request, output *DeleteBucketCORSOutput) {
+ op := &aws.Operation{
+ Name: opDeleteBucketCORS,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?cors",
+ }
+
+ if input == nil {
+ input = &DeleteBucketCORSInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &DeleteBucketCORSOutput{}
+ req.Data = output
+ return
+}
+
+// Deletes the cors configuration information set for the bucket.
+func (c *S3) DeleteBucketCORS(input *DeleteBucketCORSInput) (*DeleteBucketCORSOutput, error) {
+ req, out := c.DeleteBucketCORSRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opDeleteBucketLifecycle = "DeleteBucketLifecycle"
+
+// DeleteBucketLifecycleRequest generates a request for the DeleteBucketLifecycle operation.
+func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *aws.Request, output *DeleteBucketLifecycleOutput) {
+ op := &aws.Operation{
+ Name: opDeleteBucketLifecycle,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?lifecycle",
+ }
+
+ if input == nil {
+ input = &DeleteBucketLifecycleInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &DeleteBucketLifecycleOutput{}
+ req.Data = output
+ return
+}
+
+// Deletes the lifecycle configuration from the bucket.
+func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) {
+ req, out := c.DeleteBucketLifecycleRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opDeleteBucketPolicy = "DeleteBucketPolicy"
+
+// DeleteBucketPolicyRequest generates a request for the DeleteBucketPolicy operation.
+func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *aws.Request, output *DeleteBucketPolicyOutput) {
+ op := &aws.Operation{
+ Name: opDeleteBucketPolicy,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?policy",
+ }
+
+ if input == nil {
+ input = &DeleteBucketPolicyInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &DeleteBucketPolicyOutput{}
+ req.Data = output
+ return
+}
+
+// Deletes the policy from the bucket.
+func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) {
+ req, out := c.DeleteBucketPolicyRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opDeleteBucketReplication = "DeleteBucketReplication"
+
+// DeleteBucketReplicationRequest generates a request for the DeleteBucketReplication operation.
+func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *aws.Request, output *DeleteBucketReplicationOutput) {
+ op := &aws.Operation{
+ Name: opDeleteBucketReplication,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?replication",
+ }
+
+ if input == nil {
+ input = &DeleteBucketReplicationInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &DeleteBucketReplicationOutput{}
+ req.Data = output
+ return
+}
+
+func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) {
+ req, out := c.DeleteBucketReplicationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opDeleteBucketTagging = "DeleteBucketTagging"
+
+// DeleteBucketTaggingRequest generates a request for the DeleteBucketTagging operation.
+func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *aws.Request, output *DeleteBucketTaggingOutput) {
+ op := &aws.Operation{
+ Name: opDeleteBucketTagging,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?tagging",
+ }
+
+ if input == nil {
+ input = &DeleteBucketTaggingInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &DeleteBucketTaggingOutput{}
+ req.Data = output
+ return
+}
+
+// Deletes the tags from the bucket.
+func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) {
+ req, out := c.DeleteBucketTaggingRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opDeleteBucketWebsite = "DeleteBucketWebsite"
+
+// DeleteBucketWebsiteRequest generates a request for the DeleteBucketWebsite operation.
+func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *aws.Request, output *DeleteBucketWebsiteOutput) {
+ op := &aws.Operation{
+ Name: opDeleteBucketWebsite,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?website",
+ }
+
+ if input == nil {
+ input = &DeleteBucketWebsiteInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &DeleteBucketWebsiteOutput{}
+ req.Data = output
+ return
+}
+
+// This operation removes the website configuration from the bucket.
+func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) {
+ req, out := c.DeleteBucketWebsiteRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opDeleteObject = "DeleteObject"
+
+// DeleteObjectRequest generates a request for the DeleteObject operation.
+func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *aws.Request, output *DeleteObjectOutput) {
+ op := &aws.Operation{
+ Name: opDeleteObject,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &DeleteObjectInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &DeleteObjectOutput{}
+ req.Data = output
+ return
+}
+
+// Removes the null version (if there is one) of an object and inserts a delete
+// marker, which becomes the latest version of the object. If there isn't a
+// null version, Amazon S3 does not remove any objects.
+func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) {
+ req, out := c.DeleteObjectRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opDeleteObjects = "DeleteObjects"
+
+// DeleteObjectsRequest generates a request for the DeleteObjects operation.
+func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *aws.Request, output *DeleteObjectsOutput) {
+ op := &aws.Operation{
+ Name: opDeleteObjects,
+ HTTPMethod: "POST",
+ HTTPPath: "/{Bucket}?delete",
+ }
+
+ if input == nil {
+ input = &DeleteObjectsInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &DeleteObjectsOutput{}
+ req.Data = output
+ return
+}
+
+// This operation enables you to delete multiple objects from a bucket using
+// a single HTTP request. You may specify up to 1000 keys.
+func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) {
+ req, out := c.DeleteObjectsRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetBucketACL = "GetBucketAcl"
+
+// GetBucketACLRequest generates a request for the GetBucketACL operation.
+func (c *S3) GetBucketACLRequest(input *GetBucketACLInput) (req *aws.Request, output *GetBucketACLOutput) {
+ op := &aws.Operation{
+ Name: opGetBucketACL,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?acl",
+ }
+
+ if input == nil {
+ input = &GetBucketACLInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetBucketACLOutput{}
+ req.Data = output
+ return
+}
+
+// Gets the access control policy for the bucket.
+func (c *S3) GetBucketACL(input *GetBucketACLInput) (*GetBucketACLOutput, error) {
+ req, out := c.GetBucketACLRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetBucketCORS = "GetBucketCors"
+
+// GetBucketCORSRequest generates a request for the GetBucketCORS operation.
+func (c *S3) GetBucketCORSRequest(input *GetBucketCORSInput) (req *aws.Request, output *GetBucketCORSOutput) {
+ op := &aws.Operation{
+ Name: opGetBucketCORS,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?cors",
+ }
+
+ if input == nil {
+ input = &GetBucketCORSInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetBucketCORSOutput{}
+ req.Data = output
+ return
+}
+
+// Returns the cors configuration for the bucket.
+func (c *S3) GetBucketCORS(input *GetBucketCORSInput) (*GetBucketCORSOutput, error) {
+ req, out := c.GetBucketCORSRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetBucketLifecycle = "GetBucketLifecycle"
+
+// GetBucketLifecycleRequest generates a request for the GetBucketLifecycle operation.
+func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *aws.Request, output *GetBucketLifecycleOutput) {
+ op := &aws.Operation{
+ Name: opGetBucketLifecycle,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?lifecycle",
+ }
+
+ if input == nil {
+ input = &GetBucketLifecycleInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetBucketLifecycleOutput{}
+ req.Data = output
+ return
+}
+
+// Returns the lifecycle configuration information set on the bucket.
+func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) {
+ req, out := c.GetBucketLifecycleRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetBucketLocation = "GetBucketLocation"
+
+// GetBucketLocationRequest generates a request for the GetBucketLocation operation.
+func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *aws.Request, output *GetBucketLocationOutput) {
+ op := &aws.Operation{
+ Name: opGetBucketLocation,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?location",
+ }
+
+ if input == nil {
+ input = &GetBucketLocationInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetBucketLocationOutput{}
+ req.Data = output
+ return
+}
+
+// Returns the region the bucket resides in.
+func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) {
+ req, out := c.GetBucketLocationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetBucketLogging = "GetBucketLogging"
+
+// GetBucketLoggingRequest generates a request for the GetBucketLogging operation.
+func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *aws.Request, output *GetBucketLoggingOutput) {
+ op := &aws.Operation{
+ Name: opGetBucketLogging,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?logging",
+ }
+
+ if input == nil {
+ input = &GetBucketLoggingInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetBucketLoggingOutput{}
+ req.Data = output
+ return
+}
+
+// Returns the logging status of a bucket and the permissions users have to
+// view and modify that status. To use GET, you must be the bucket owner.
+func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) {
+ req, out := c.GetBucketLoggingRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetBucketNotification = "GetBucketNotification"
+
+// GetBucketNotificationRequest generates a request for the GetBucketNotification operation.
+func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *aws.Request, output *NotificationConfigurationDeprecated) {
+ op := &aws.Operation{
+ Name: opGetBucketNotification,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?notification",
+ }
+
+ if input == nil {
+ input = &GetBucketNotificationConfigurationRequest{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &NotificationConfigurationDeprecated{}
+ req.Data = output
+ return
+}
+
+// Deprecated, see the GetBucketNotificationConfiguration operation.
+func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) {
+ req, out := c.GetBucketNotificationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration"
+
+// GetBucketNotificationConfigurationRequest generates a request for the GetBucketNotificationConfiguration operation.
+func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *aws.Request, output *NotificationConfiguration) {
+ op := &aws.Operation{
+ Name: opGetBucketNotificationConfiguration,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?notification",
+ }
+
+ if input == nil {
+ input = &GetBucketNotificationConfigurationRequest{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &NotificationConfiguration{}
+ req.Data = output
+ return
+}
+
+// Returns the notification configuration of a bucket.
+func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) {
+ req, out := c.GetBucketNotificationConfigurationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetBucketPolicy = "GetBucketPolicy"
+
+// GetBucketPolicyRequest generates a request for the GetBucketPolicy operation.
+func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *aws.Request, output *GetBucketPolicyOutput) {
+ op := &aws.Operation{
+ Name: opGetBucketPolicy,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?policy",
+ }
+
+ if input == nil {
+ input = &GetBucketPolicyInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetBucketPolicyOutput{}
+ req.Data = output
+ return
+}
+
+// Returns the policy of a specified bucket.
+func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) {
+ req, out := c.GetBucketPolicyRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetBucketReplication = "GetBucketReplication"
+
+// GetBucketReplicationRequest generates a request for the GetBucketReplication operation.
+func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *aws.Request, output *GetBucketReplicationOutput) {
+ op := &aws.Operation{
+ Name: opGetBucketReplication,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?replication",
+ }
+
+ if input == nil {
+ input = &GetBucketReplicationInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetBucketReplicationOutput{}
+ req.Data = output
+ return
+}
+
+func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) {
+ req, out := c.GetBucketReplicationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetBucketRequestPayment = "GetBucketRequestPayment"
+
+// GetBucketRequestPaymentRequest generates a request for the GetBucketRequestPayment operation.
+func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *aws.Request, output *GetBucketRequestPaymentOutput) {
+ op := &aws.Operation{
+ Name: opGetBucketRequestPayment,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?requestPayment",
+ }
+
+ if input == nil {
+ input = &GetBucketRequestPaymentInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetBucketRequestPaymentOutput{}
+ req.Data = output
+ return
+}
+
+// Returns the request payment configuration of a bucket.
+func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) {
+ req, out := c.GetBucketRequestPaymentRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetBucketTagging = "GetBucketTagging"
+
+// GetBucketTaggingRequest generates a request for the GetBucketTagging operation.
+func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *aws.Request, output *GetBucketTaggingOutput) {
+ op := &aws.Operation{
+ Name: opGetBucketTagging,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?tagging",
+ }
+
+ if input == nil {
+ input = &GetBucketTaggingInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetBucketTaggingOutput{}
+ req.Data = output
+ return
+}
+
+// Returns the tag set associated with the bucket.
+func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) {
+ req, out := c.GetBucketTaggingRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetBucketVersioning = "GetBucketVersioning"
+
+// GetBucketVersioningRequest generates a request for the GetBucketVersioning operation.
+func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *aws.Request, output *GetBucketVersioningOutput) {
+ op := &aws.Operation{
+ Name: opGetBucketVersioning,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?versioning",
+ }
+
+ if input == nil {
+ input = &GetBucketVersioningInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetBucketVersioningOutput{}
+ req.Data = output
+ return
+}
+
+// Returns the versioning state of a bucket.
+func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) {
+ req, out := c.GetBucketVersioningRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetBucketWebsite = "GetBucketWebsite"
+
+// GetBucketWebsiteRequest generates a request for the GetBucketWebsite operation.
+func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *aws.Request, output *GetBucketWebsiteOutput) {
+ op := &aws.Operation{
+ Name: opGetBucketWebsite,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?website",
+ }
+
+ if input == nil {
+ input = &GetBucketWebsiteInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetBucketWebsiteOutput{}
+ req.Data = output
+ return
+}
+
+// Returns the website configuration for a bucket.
+func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) {
+ req, out := c.GetBucketWebsiteRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetObject = "GetObject"
+
+// GetObjectRequest generates a request for the GetObject operation.
+func (c *S3) GetObjectRequest(input *GetObjectInput) (req *aws.Request, output *GetObjectOutput) {
+ op := &aws.Operation{
+ Name: opGetObject,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &GetObjectInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetObjectOutput{}
+ req.Data = output
+ return
+}
+
+// Retrieves objects from Amazon S3.
+func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) {
+ req, out := c.GetObjectRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetObjectACL = "GetObjectAcl"
+
+// GetObjectACLRequest generates a request for the GetObjectACL operation.
+func (c *S3) GetObjectACLRequest(input *GetObjectACLInput) (req *aws.Request, output *GetObjectACLOutput) {
+ op := &aws.Operation{
+ Name: opGetObjectACL,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}/{Key+}?acl",
+ }
+
+ if input == nil {
+ input = &GetObjectACLInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetObjectACLOutput{}
+ req.Data = output
+ return
+}
+
+// Returns the access control list (ACL) of an object.
+func (c *S3) GetObjectACL(input *GetObjectACLInput) (*GetObjectACLOutput, error) {
+ req, out := c.GetObjectACLRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetObjectTorrent = "GetObjectTorrent"
+
+// GetObjectTorrentRequest generates a request for the GetObjectTorrent operation.
+func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *aws.Request, output *GetObjectTorrentOutput) {
+ op := &aws.Operation{
+ Name: opGetObjectTorrent,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}/{Key+}?torrent",
+ }
+
+ if input == nil {
+ input = &GetObjectTorrentInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetObjectTorrentOutput{}
+ req.Data = output
+ return
+}
+
+// Return torrent files from a bucket.
+func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) {
+ req, out := c.GetObjectTorrentRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opHeadBucket = "HeadBucket"
+
+// HeadBucketRequest generates a request for the HeadBucket operation.
+func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *aws.Request, output *HeadBucketOutput) {
+ op := &aws.Operation{
+ Name: opHeadBucket,
+ HTTPMethod: "HEAD",
+ HTTPPath: "/{Bucket}",
+ }
+
+ if input == nil {
+ input = &HeadBucketInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &HeadBucketOutput{}
+ req.Data = output
+ return
+}
+
+// This operation is useful to determine if a bucket exists and you have permission
+// to access it.
+func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) {
+ req, out := c.HeadBucketRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opHeadObject = "HeadObject"
+
+// HeadObjectRequest generates a request for the HeadObject operation.
+func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *aws.Request, output *HeadObjectOutput) {
+ op := &aws.Operation{
+ Name: opHeadObject,
+ HTTPMethod: "HEAD",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &HeadObjectInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &HeadObjectOutput{}
+ req.Data = output
+ return
+}
+
+// The HEAD operation retrieves metadata from an object without returning the
+// object itself. This operation is useful if you're only interested in an object's
+// metadata. To use HEAD, you must have READ access to the object.
+func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) {
+ req, out := c.HeadObjectRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opListBuckets = "ListBuckets"
+
+// ListBucketsRequest generates a request for the ListBuckets operation.
+func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *aws.Request, output *ListBucketsOutput) {
+ op := &aws.Operation{
+ Name: opListBuckets,
+ HTTPMethod: "GET",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ListBucketsInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &ListBucketsOutput{}
+ req.Data = output
+ return
+}
+
+// Returns a list of all buckets owned by the authenticated sender of the request.
+func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) {
+ req, out := c.ListBucketsRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opListMultipartUploads = "ListMultipartUploads"
+
+// ListMultipartUploadsRequest generates a request for the ListMultipartUploads operation.
+func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *aws.Request, output *ListMultipartUploadsOutput) {
+ op := &aws.Operation{
+ Name: opListMultipartUploads,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?uploads",
+ Paginator: &aws.Paginator{
+ InputTokens: []string{"KeyMarker", "UploadIdMarker"},
+ OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"},
+ LimitToken: "MaxUploads",
+ TruncationToken: "IsTruncated",
+ },
+ }
+
+ if input == nil {
+ input = &ListMultipartUploadsInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &ListMultipartUploadsOutput{}
+ req.Data = output
+ return
+}
+
+// This operation lists in-progress multipart uploads.
+func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) {
+ req, out := c.ListMultipartUploadsRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(p *ListMultipartUploadsOutput, lastPage bool) (shouldContinue bool)) error {
+ page, _ := c.ListMultipartUploadsRequest(input)
+ return page.EachPage(func(p interface{}, lastPage bool) bool {
+ return fn(p.(*ListMultipartUploadsOutput), lastPage)
+ })
+}
+
+const opListObjectVersions = "ListObjectVersions"
+
+// ListObjectVersionsRequest generates a request for the ListObjectVersions operation.
+func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *aws.Request, output *ListObjectVersionsOutput) {
+ op := &aws.Operation{
+ Name: opListObjectVersions,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?versions",
+ Paginator: &aws.Paginator{
+ InputTokens: []string{"KeyMarker", "VersionIdMarker"},
+ OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"},
+ LimitToken: "MaxKeys",
+ TruncationToken: "IsTruncated",
+ },
+ }
+
+ if input == nil {
+ input = &ListObjectVersionsInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &ListObjectVersionsOutput{}
+ req.Data = output
+ return
+}
+
+// Returns metadata about all of the versions of objects in a bucket.
+func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) {
+ req, out := c.ListObjectVersionsRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(p *ListObjectVersionsOutput, lastPage bool) (shouldContinue bool)) error {
+ page, _ := c.ListObjectVersionsRequest(input)
+ return page.EachPage(func(p interface{}, lastPage bool) bool {
+ return fn(p.(*ListObjectVersionsOutput), lastPage)
+ })
+}
+
+const opListObjects = "ListObjects"
+
+// ListObjectsRequest generates a request for the ListObjects operation.
+func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *aws.Request, output *ListObjectsOutput) {
+ op := &aws.Operation{
+ Name: opListObjects,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}",
+ Paginator: &aws.Paginator{
+ InputTokens: []string{"Marker"},
+ OutputTokens: []string{"NextMarker || Contents[-1].Key"},
+ LimitToken: "MaxKeys",
+ TruncationToken: "IsTruncated",
+ },
+ }
+
+ if input == nil {
+ input = &ListObjectsInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &ListObjectsOutput{}
+ req.Data = output
+ return
+}
+
+// Returns some or all (up to 1000) of the objects in a bucket. You can use
+// the request parameters as selection criteria to return a subset of the objects
+// in a bucket.
+func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) {
+ req, out := c.ListObjectsRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(p *ListObjectsOutput, lastPage bool) (shouldContinue bool)) error {
+ page, _ := c.ListObjectsRequest(input)
+ return page.EachPage(func(p interface{}, lastPage bool) bool {
+ return fn(p.(*ListObjectsOutput), lastPage)
+ })
+}
+
+const opListParts = "ListParts"
+
+// ListPartsRequest generates a request for the ListParts operation.
+func (c *S3) ListPartsRequest(input *ListPartsInput) (req *aws.Request, output *ListPartsOutput) {
+ op := &aws.Operation{
+ Name: opListParts,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}/{Key+}",
+ Paginator: &aws.Paginator{
+ InputTokens: []string{"PartNumberMarker"},
+ OutputTokens: []string{"NextPartNumberMarker"},
+ LimitToken: "MaxParts",
+ TruncationToken: "IsTruncated",
+ },
+ }
+
+ if input == nil {
+ input = &ListPartsInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &ListPartsOutput{}
+ req.Data = output
+ return
+}
+
+// Lists the parts that have been uploaded for a specific multipart upload.
+func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) {
+ req, out := c.ListPartsRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+func (c *S3) ListPartsPages(input *ListPartsInput, fn func(p *ListPartsOutput, lastPage bool) (shouldContinue bool)) error {
+ page, _ := c.ListPartsRequest(input)
+ return page.EachPage(func(p interface{}, lastPage bool) bool {
+ return fn(p.(*ListPartsOutput), lastPage)
+ })
+}
+
+const opPutBucketACL = "PutBucketAcl"
+
+// PutBucketACLRequest generates a request for the PutBucketACL operation.
+func (c *S3) PutBucketACLRequest(input *PutBucketACLInput) (req *aws.Request, output *PutBucketACLOutput) {
+ op := &aws.Operation{
+ Name: opPutBucketACL,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?acl",
+ }
+
+ if input == nil {
+ input = &PutBucketACLInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &PutBucketACLOutput{}
+ req.Data = output
+ return
+}
+
+// Sets the permissions on a bucket using access control lists (ACL).
+func (c *S3) PutBucketACL(input *PutBucketACLInput) (*PutBucketACLOutput, error) {
+ req, out := c.PutBucketACLRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opPutBucketCORS = "PutBucketCors"
+
+// PutBucketCORSRequest generates a request for the PutBucketCORS operation.
+func (c *S3) PutBucketCORSRequest(input *PutBucketCORSInput) (req *aws.Request, output *PutBucketCORSOutput) {
+ op := &aws.Operation{
+ Name: opPutBucketCORS,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?cors",
+ }
+
+ if input == nil {
+ input = &PutBucketCORSInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &PutBucketCORSOutput{}
+ req.Data = output
+ return
+}
+
+// Sets the cors configuration for a bucket.
+func (c *S3) PutBucketCORS(input *PutBucketCORSInput) (*PutBucketCORSOutput, error) {
+ req, out := c.PutBucketCORSRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opPutBucketLifecycle = "PutBucketLifecycle"
+
+// PutBucketLifecycleRequest generates a request for the PutBucketLifecycle operation.
+func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *aws.Request, output *PutBucketLifecycleOutput) {
+ op := &aws.Operation{
+ Name: opPutBucketLifecycle,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?lifecycle",
+ }
+
+ if input == nil {
+ input = &PutBucketLifecycleInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &PutBucketLifecycleOutput{}
+ req.Data = output
+ return
+}
+
+// Sets lifecycle configuration for your bucket. If a lifecycle configuration
+// exists, it replaces it.
+func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) {
+ req, out := c.PutBucketLifecycleRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opPutBucketLogging = "PutBucketLogging"
+
+// PutBucketLoggingRequest generates a request for the PutBucketLogging operation.
+func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *aws.Request, output *PutBucketLoggingOutput) {
+ op := &aws.Operation{
+ Name: opPutBucketLogging,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?logging",
+ }
+
+ if input == nil {
+ input = &PutBucketLoggingInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &PutBucketLoggingOutput{}
+ req.Data = output
+ return
+}
+
+// Set the logging parameters for a bucket and to specify permissions for who
+// can view and modify the logging parameters. To set the logging status of
+// a bucket, you must be the bucket owner.
+func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) {
+ req, out := c.PutBucketLoggingRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opPutBucketNotification = "PutBucketNotification"
+
+// PutBucketNotificationRequest generates a request for the PutBucketNotification operation.
+func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *aws.Request, output *PutBucketNotificationOutput) {
+ op := &aws.Operation{
+ Name: opPutBucketNotification,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?notification",
+ }
+
+ if input == nil {
+ input = &PutBucketNotificationInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &PutBucketNotificationOutput{}
+ req.Data = output
+ return
+}
+
+// Deprecated, see the PutBucketNotificationConfiguraiton operation.
+func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) {
+ req, out := c.PutBucketNotificationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration"
+
+// PutBucketNotificationConfigurationRequest generates a request for the PutBucketNotificationConfiguration operation.
+func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *aws.Request, output *PutBucketNotificationConfigurationOutput) {
+ op := &aws.Operation{
+ Name: opPutBucketNotificationConfiguration,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?notification",
+ }
+
+ if input == nil {
+ input = &PutBucketNotificationConfigurationInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &PutBucketNotificationConfigurationOutput{}
+ req.Data = output
+ return
+}
+
+// Enables notifications of specified events for a bucket.
+func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) {
+ req, out := c.PutBucketNotificationConfigurationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opPutBucketPolicy = "PutBucketPolicy"
+
+// PutBucketPolicyRequest generates a request for the PutBucketPolicy operation.
+func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *aws.Request, output *PutBucketPolicyOutput) {
+ op := &aws.Operation{
+ Name: opPutBucketPolicy,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?policy",
+ }
+
+ if input == nil {
+ input = &PutBucketPolicyInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &PutBucketPolicyOutput{}
+ req.Data = output
+ return
+}
+
+// Replaces a policy on a bucket. If the bucket already has a policy, the one
+// in this request completely replaces it.
+func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) {
+ req, out := c.PutBucketPolicyRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opPutBucketReplication = "PutBucketReplication"
+
+// PutBucketReplicationRequest generates a request for the PutBucketReplication operation.
+func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *aws.Request, output *PutBucketReplicationOutput) {
+ op := &aws.Operation{
+ Name: opPutBucketReplication,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?replication",
+ }
+
+ if input == nil {
+ input = &PutBucketReplicationInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &PutBucketReplicationOutput{}
+ req.Data = output
+ return
+}
+
+// Creates a new replication configuration (or replaces an existing one, if
+// present).
+func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) {
+ req, out := c.PutBucketReplicationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opPutBucketRequestPayment = "PutBucketRequestPayment"
+
+// PutBucketRequestPaymentRequest generates a request for the PutBucketRequestPayment operation.
+func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *aws.Request, output *PutBucketRequestPaymentOutput) {
+ op := &aws.Operation{
+ Name: opPutBucketRequestPayment,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?requestPayment",
+ }
+
+ if input == nil {
+ input = &PutBucketRequestPaymentInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &PutBucketRequestPaymentOutput{}
+ req.Data = output
+ return
+}
+
+// Sets the request payment configuration for a bucket. By default, the bucket
+// owner pays for downloads from the bucket. This configuration parameter enables
+// the bucket owner (only) to specify that the person requesting the download
+// will be charged for the download. Documentation on requester pays buckets
+// can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html
+func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) {
+ req, out := c.PutBucketRequestPaymentRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opPutBucketTagging = "PutBucketTagging"
+
+// PutBucketTaggingRequest generates a request for the PutBucketTagging operation.
+func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *aws.Request, output *PutBucketTaggingOutput) {
+ op := &aws.Operation{
+ Name: opPutBucketTagging,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?tagging",
+ }
+
+ if input == nil {
+ input = &PutBucketTaggingInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &PutBucketTaggingOutput{}
+ req.Data = output
+ return
+}
+
+// Sets the tags for a bucket.
+func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) {
+ req, out := c.PutBucketTaggingRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opPutBucketVersioning = "PutBucketVersioning"
+
+// PutBucketVersioningRequest generates a request for the PutBucketVersioning operation.
+func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *aws.Request, output *PutBucketVersioningOutput) {
+ op := &aws.Operation{
+ Name: opPutBucketVersioning,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?versioning",
+ }
+
+ if input == nil {
+ input = &PutBucketVersioningInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &PutBucketVersioningOutput{}
+ req.Data = output
+ return
+}
+
+// Sets the versioning state of an existing bucket. To set the versioning state,
+// you must be the bucket owner.
+func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) {
+ req, out := c.PutBucketVersioningRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opPutBucketWebsite = "PutBucketWebsite"
+
+// PutBucketWebsiteRequest generates a request for the PutBucketWebsite operation.
+func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *aws.Request, output *PutBucketWebsiteOutput) {
+ op := &aws.Operation{
+ Name: opPutBucketWebsite,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?website",
+ }
+
+ if input == nil {
+ input = &PutBucketWebsiteInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &PutBucketWebsiteOutput{}
+ req.Data = output
+ return
+}
+
+// Set the website configuration for a bucket.
+func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) {
+ req, out := c.PutBucketWebsiteRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opPutObject = "PutObject"
+
+// PutObjectRequest generates a request for the PutObject operation.
+func (c *S3) PutObjectRequest(input *PutObjectInput) (req *aws.Request, output *PutObjectOutput) {
+ op := &aws.Operation{
+ Name: opPutObject,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &PutObjectInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &PutObjectOutput{}
+ req.Data = output
+ return
+}
+
+// Adds an object to a bucket.
+func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) {
+ req, out := c.PutObjectRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opPutObjectACL = "PutObjectAcl"
+
+// PutObjectACLRequest generates a request for the PutObjectACL operation.
+func (c *S3) PutObjectACLRequest(input *PutObjectACLInput) (req *aws.Request, output *PutObjectACLOutput) {
+ op := &aws.Operation{
+ Name: opPutObjectACL,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}/{Key+}?acl",
+ }
+
+ if input == nil {
+ input = &PutObjectACLInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &PutObjectACLOutput{}
+ req.Data = output
+ return
+}
+
+// uses the acl subresource to set the access control list (ACL) permissions
+// for an object that already exists in a bucket
+func (c *S3) PutObjectACL(input *PutObjectACLInput) (*PutObjectACLOutput, error) {
+ req, out := c.PutObjectACLRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opRestoreObject = "RestoreObject"
+
+// RestoreObjectRequest generates a request for the RestoreObject operation.
+func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *aws.Request, output *RestoreObjectOutput) {
+ op := &aws.Operation{
+ Name: opRestoreObject,
+ HTTPMethod: "POST",
+ HTTPPath: "/{Bucket}/{Key+}?restore",
+ }
+
+ if input == nil {
+ input = &RestoreObjectInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &RestoreObjectOutput{}
+ req.Data = output
+ return
+}
+
+// Restores an archived copy of an object back into Amazon S3
+func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) {
+ req, out := c.RestoreObjectRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opUploadPart = "UploadPart"
+
+// UploadPartRequest generates a request for the UploadPart operation.
+func (c *S3) UploadPartRequest(input *UploadPartInput) (req *aws.Request, output *UploadPartOutput) {
+ op := &aws.Operation{
+ Name: opUploadPart,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &UploadPartInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &UploadPartOutput{}
+ req.Data = output
+ return
+}
+
+// Uploads a part in a multipart upload.
+//
+// Note: After you initiate multipart upload and upload one or more parts,
+// you must either complete or abort multipart upload in order to stop getting
+// charged for storage of the uploaded parts. Only after you either complete
+// or abort multipart upload, Amazon S3 frees up the parts storage and stops
+// charging you for the parts storage.
+func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) {
+ req, out := c.UploadPartRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opUploadPartCopy = "UploadPartCopy"
+
+// UploadPartCopyRequest generates a request for the UploadPartCopy operation.
+func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *aws.Request, output *UploadPartCopyOutput) {
+ op := &aws.Operation{
+ Name: opUploadPartCopy,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &UploadPartCopyInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &UploadPartCopyOutput{}
+ req.Data = output
+ return
+}
+
+// Uploads a part by copying data from an existing object as data source.
+func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) {
+ req, out := c.UploadPartCopyRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+type AbortMultipartUploadInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ Key *string `location:"uri" locationName:"Key" type:"string" required:"true"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"`
+
+ UploadID *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
+
+ metadataAbortMultipartUploadInput `json:"-" xml:"-"`
+}
+
+type metadataAbortMultipartUploadInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type AbortMultipartUploadOutput struct {
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"`
+
+ metadataAbortMultipartUploadOutput `json:"-" xml:"-"`
+}
+
+type metadataAbortMultipartUploadOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type AccessControlPolicy struct {
+ // A list of grants.
+ Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"`
+
+ Owner *Owner `type:"structure"`
+
+ metadataAccessControlPolicy `json:"-" xml:"-"`
+}
+
+type metadataAccessControlPolicy struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type Bucket struct {
+ // Date the bucket was created.
+ CreationDate *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ // The name of the bucket.
+ Name *string `type:"string"`
+
+ metadataBucket `json:"-" xml:"-"`
+}
+
+type metadataBucket struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type BucketLoggingStatus struct {
+ LoggingEnabled *LoggingEnabled `type:"structure"`
+
+ metadataBucketLoggingStatus `json:"-" xml:"-"`
+}
+
+type metadataBucketLoggingStatus struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type CORSConfiguration struct {
+ CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"`
+
+ metadataCORSConfiguration `json:"-" xml:"-"`
+}
+
+type metadataCORSConfiguration struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type CORSRule struct {
+ // Specifies which headers are allowed in a pre-flight OPTIONS request.
+ AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"`
+
+ // Identifies HTTP methods that the domain/origin specified in the rule is allowed
+ // to execute.
+ AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true"`
+
+ // One or more origins you want customers to be able to access the bucket from.
+ AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true"`
+
+ // One or more headers in the response that you want customers to be able to
+ // access from their applications (for example, from a JavaScript XMLHttpRequest
+ // object).
+ ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"`
+
+ // The time in seconds that your browser is to cache the preflight response
+ // for the specified resource.
+ MaxAgeSeconds *int64 `type:"integer"`
+
+ metadataCORSRule `json:"-" xml:"-"`
+}
+
+type metadataCORSRule struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type CloudFunctionConfiguration struct {
+ CloudFunction *string `type:"string"`
+
+ // Bucket event for which to send notifications.
+ Event *string `type:"string"`
+
+ Events []*string `locationName:"Event" type:"list" flattened:"true"`
+
+ // Optional unique identifier for configurations in a notification configuration.
+ // If you don't provide one, Amazon S3 will assign an ID.
+ ID *string `locationName:"Id" type:"string"`
+
+ InvocationRole *string `type:"string"`
+
+ metadataCloudFunctionConfiguration `json:"-" xml:"-"`
+}
+
+type metadataCloudFunctionConfiguration struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type CommonPrefix struct {
+ Prefix *string `type:"string"`
+
+ metadataCommonPrefix `json:"-" xml:"-"`
+}
+
+type metadataCommonPrefix struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type CompleteMultipartUploadInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ Key *string `location:"uri" locationName:"Key" type:"string" required:"true"`
+
+ MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"`
+
+ UploadID *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
+
+ metadataCompleteMultipartUploadInput `json:"-" xml:"-"`
+}
+
+type metadataCompleteMultipartUploadInput struct {
+ SDKShapeTraits bool `type:"structure" payload:"MultipartUpload"`
+}
+
+type CompleteMultipartUploadOutput struct {
+ Bucket *string `type:"string"`
+
+ // Entity tag of the object.
+ ETag *string `type:"string"`
+
+ // If the object expiration is configured, this will contain the expiration
+ // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.
+ Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
+
+ Key *string `type:"string"`
+
+ Location *string `type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"`
+
+ // Version of the object.
+ VersionID *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+
+ metadataCompleteMultipartUploadOutput `json:"-" xml:"-"`
+}
+
+type metadataCompleteMultipartUploadOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type CompletedMultipartUpload struct {
+ Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"`
+
+ metadataCompletedMultipartUpload `json:"-" xml:"-"`
+}
+
+type metadataCompletedMultipartUpload struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type CompletedPart struct {
+ // Entity tag returned when the part was uploaded.
+ ETag *string `type:"string"`
+
+ // Part number that identifies the part.
+ PartNumber *int64 `type:"integer"`
+
+ metadataCompletedPart `json:"-" xml:"-"`
+}
+
+type metadataCompletedPart struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type Condition struct {
+ // The HTTP error code when the redirect is applied. In the event of an error,
+ // if the error code equals this value, then the specified redirect is applied.
+ // Required when parent element Condition is specified and sibling KeyPrefixEquals
+ // is not specified. If both are specified, then both must be true for the redirect
+ // to be applied.
+ HTTPErrorCodeReturnedEquals *string `locationName:"HttpErrorCodeReturnedEquals" type:"string"`
+
+ // The object key name prefix when the redirect is applied. For example, to
+ // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html.
+ // To redirect request for all pages with the prefix docs/, the key prefix will
+ // be /docs, which identifies all objects in the docs/ folder. Required when
+ // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals
+ // is not specified. If both conditions are specified, both must be true for
+ // the redirect to be applied.
+ KeyPrefixEquals *string `type:"string"`
+
+ metadataCondition `json:"-" xml:"-"`
+}
+
+type metadataCondition struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type CopyObjectInput struct {
+ // The canned ACL to apply to the object.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+ // Specifies what content encodings have been applied to the object and thus
+ // what decoding mechanisms must be applied to obtain the media-type referenced
+ // by the Content-Type header field.
+ ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+ // The language the content is in.
+ ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+ // The name of the source bucket and key name of the source object, separated
+ // by a slash (/). Must be URL-encoded.
+ CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"`
+
+ // Copies the object if its entity tag (ETag) matches the specified tag.
+ CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"`
+
+ // Copies the object if it has been modified since the specified time.
+ CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Copies the object if its entity tag (ETag) is different than the specified
+ // ETag.
+ CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"`
+
+ // Copies the object if it hasn't been modified since the specified time.
+ CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Specifies the algorithm to use when decrypting the source object (e.g., AES256).
+ CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt
+ // the source object. The encryption key provided in this header must be one
+ // that was used when the source object was created.
+ CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to read the object data and its metadata.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the object ACL.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable object.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+
+ Key *string `location:"uri" locationName:"Key" type:"string" required:"true"`
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+ // Specifies whether the metadata is copied from the source object or replaced
+ // with metadata provided in the request.
+ MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"`
+
+ // Specifies the algorithm to use to when encrypting the object (e.g., AES256,
+ // aws:kms).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // does not store the encryption key. The key must be appropriate for use with
+ // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+ // header.
+ SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
+ // requests for an object protected by AWS KMS will fail if not made via SSL
+ // or using SigV4. Documentation on configuring any of the officially supported
+ // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
+ SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"`
+
+ // The type of storage to use for the object. Defaults to 'STANDARD'.
+ StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string"`
+
+ // If the bucket is configured as a website, redirects requests for this object
+ // to another object in the same bucket or to an external URL. Amazon S3 stores
+ // the value of this header in the object metadata.
+ WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+
+ metadataCopyObjectInput `json:"-" xml:"-"`
+}
+
+type metadataCopyObjectInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type CopyObjectOutput struct {
+ CopyObjectResult *CopyObjectResult `type:"structure"`
+
+ CopySourceVersionID *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"`
+
+ // If the object expiration is configured, the response includes this header.
+ Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"`
+
+ metadataCopyObjectOutput `json:"-" xml:"-"`
+}
+
+type metadataCopyObjectOutput struct {
+ SDKShapeTraits bool `type:"structure" payload:"CopyObjectResult"`
+}
+
+type CopyObjectResult struct {
+ ETag *string `type:"string"`
+
+ LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ metadataCopyObjectResult `json:"-" xml:"-"`
+}
+
+type metadataCopyObjectResult struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type CopyPartResult struct {
+ // Entity tag of the object.
+ ETag *string `type:"string"`
+
+ // Date and time at which the object was uploaded.
+ LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ metadataCopyPartResult `json:"-" xml:"-"`
+}
+
+type metadataCopyPartResult struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type CreateBucketConfiguration struct {
+ // Specifies the region where the bucket will be created. If you don't specify
+ // a region, the bucket will be created in US Standard.
+ LocationConstraint *string `type:"string"`
+
+ metadataCreateBucketConfiguration `json:"-" xml:"-"`
+}
+
+type metadataCreateBucketConfiguration struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type CreateBucketInput struct {
+ // The canned ACL to apply to the bucket.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure"`
+
+ // Allows grantee the read, write, read ACP, and write ACP permissions on the
+ // bucket.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to list the objects in the bucket.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the bucket ACL.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to create, overwrite, and delete any object in the bucket.
+ GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable bucket.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+
+ metadataCreateBucketInput `json:"-" xml:"-"`
+}
+
+type metadataCreateBucketInput struct {
+ SDKShapeTraits bool `type:"structure" payload:"CreateBucketConfiguration"`
+}
+
+type CreateBucketOutput struct {
+ Location *string `location:"header" locationName:"Location" type:"string"`
+
+ metadataCreateBucketOutput `json:"-" xml:"-"`
+}
+
+type metadataCreateBucketOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type CreateMultipartUploadInput struct {
+ // The canned ACL to apply to the object.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+ // Specifies what content encodings have been applied to the object and thus
+ // what decoding mechanisms must be applied to obtain the media-type referenced
+ // by the Content-Type header field.
+ ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+ // The language the content is in.
+ ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to read the object data and its metadata.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the object ACL.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable object.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+
+ Key *string `location:"uri" locationName:"Key" type:"string" required:"true"`
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"`
+
+ // Specifies the algorithm to use to when encrypting the object (e.g., AES256,
+ // aws:kms).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // does not store the encryption key. The key must be appropriate for use with
+ // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+ // header.
+ SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
+ // requests for an object protected by AWS KMS will fail if not made via SSL
+ // or using SigV4. Documentation on configuring any of the officially supported
+ // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
+ SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"`
+
+ // The type of storage to use for the object. Defaults to 'STANDARD'.
+ StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string"`
+
+ // If the bucket is configured as a website, redirects requests for this object
+ // to another object in the same bucket or to an external URL. Amazon S3 stores
+ // the value of this header in the object metadata.
+ WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+
+ metadataCreateMultipartUploadInput `json:"-" xml:"-"`
+}
+
+type metadataCreateMultipartUploadInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type CreateMultipartUploadOutput struct {
+ // Name of the bucket to which the multipart upload was initiated.
+ Bucket *string `locationName:"Bucket" type:"string"`
+
+ // Object key for which the multipart upload was initiated.
+ Key *string `type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"`
+
+ // ID for the initiated multipart upload.
+ UploadID *string `locationName:"UploadId" type:"string"`
+
+ metadataCreateMultipartUploadOutput `json:"-" xml:"-"`
+}
+
+type metadataCreateMultipartUploadOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type Delete struct {
+ Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"`
+
+ // Element to enable quiet mode for the request. When you add this element,
+ // you must set its value to true.
+ Quiet *bool `type:"boolean"`
+
+ metadataDelete `json:"-" xml:"-"`
+}
+
+type metadataDelete struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type DeleteBucketCORSInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ metadataDeleteBucketCORSInput `json:"-" xml:"-"`
+}
+
+type metadataDeleteBucketCORSInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type DeleteBucketCORSOutput struct {
+ metadataDeleteBucketCORSOutput `json:"-" xml:"-"`
+}
+
+type metadataDeleteBucketCORSOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type DeleteBucketInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ metadataDeleteBucketInput `json:"-" xml:"-"`
+}
+
+type metadataDeleteBucketInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type DeleteBucketLifecycleInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ metadataDeleteBucketLifecycleInput `json:"-" xml:"-"`
+}
+
+type metadataDeleteBucketLifecycleInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type DeleteBucketLifecycleOutput struct {
+ metadataDeleteBucketLifecycleOutput `json:"-" xml:"-"`
+}
+
+type metadataDeleteBucketLifecycleOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type DeleteBucketOutput struct {
+ metadataDeleteBucketOutput `json:"-" xml:"-"`
+}
+
+type metadataDeleteBucketOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type DeleteBucketPolicyInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ metadataDeleteBucketPolicyInput `json:"-" xml:"-"`
+}
+
+type metadataDeleteBucketPolicyInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type DeleteBucketPolicyOutput struct {
+ metadataDeleteBucketPolicyOutput `json:"-" xml:"-"`
+}
+
+type metadataDeleteBucketPolicyOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type DeleteBucketReplicationInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ metadataDeleteBucketReplicationInput `json:"-" xml:"-"`
+}
+
+type metadataDeleteBucketReplicationInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type DeleteBucketReplicationOutput struct {
+ metadataDeleteBucketReplicationOutput `json:"-" xml:"-"`
+}
+
+type metadataDeleteBucketReplicationOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type DeleteBucketTaggingInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ metadataDeleteBucketTaggingInput `json:"-" xml:"-"`
+}
+
+type metadataDeleteBucketTaggingInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type DeleteBucketTaggingOutput struct {
+ metadataDeleteBucketTaggingOutput `json:"-" xml:"-"`
+}
+
+type metadataDeleteBucketTaggingOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type DeleteBucketWebsiteInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ metadataDeleteBucketWebsiteInput `json:"-" xml:"-"`
+}
+
+type metadataDeleteBucketWebsiteInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type DeleteBucketWebsiteOutput struct {
+ metadataDeleteBucketWebsiteOutput `json:"-" xml:"-"`
+}
+
+type metadataDeleteBucketWebsiteOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type DeleteMarkerEntry struct {
+ // Specifies whether the object is (true) or is not (false) the latest version
+ // of an object.
+ IsLatest *bool `type:"boolean"`
+
+ // The object key.
+ Key *string `type:"string"`
+
+ // Date and time the object was last modified.
+ LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ Owner *Owner `type:"structure"`
+
+ // Version ID of an object.
+ VersionID *string `locationName:"VersionId" type:"string"`
+
+ metadataDeleteMarkerEntry `json:"-" xml:"-"`
+}
+
+type metadataDeleteMarkerEntry struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type DeleteObjectInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ Key *string `location:"uri" locationName:"Key" type:"string" required:"true"`
+
+ // The concatenation of the authentication device's serial number, a space,
+ // and the value that is displayed on your authentication device.
+ MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"`
+
+ // VersionId used to reference a specific version of the object.
+ VersionID *string `location:"querystring" locationName:"versionId" type:"string"`
+
+ metadataDeleteObjectInput `json:"-" xml:"-"`
+}
+
+type metadataDeleteObjectInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type DeleteObjectOutput struct {
+ // Specifies whether the versioned object that was permanently deleted was (true)
+ // or was not (false) a delete marker.
+ DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"`
+
+ // Returns the version ID of the delete marker created as a result of the DELETE
+ // operation.
+ VersionID *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+
+ metadataDeleteObjectOutput `json:"-" xml:"-"`
+}
+
+type metadataDeleteObjectOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type DeleteObjectsInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ Delete *Delete `locationName:"Delete" type:"structure" required:"true"`
+
+ // The concatenation of the authentication device's serial number, a space,
+ // and the value that is displayed on your authentication device.
+ MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"`
+
+ metadataDeleteObjectsInput `json:"-" xml:"-"`
+}
+
+type metadataDeleteObjectsInput struct {
+ SDKShapeTraits bool `type:"structure" payload:"Delete"`
+}
+
+type DeleteObjectsOutput struct {
+ Deleted []*DeletedObject `type:"list" flattened:"true"`
+
+ Errors []*Error `locationName:"Error" type:"list" flattened:"true"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"`
+
+ metadataDeleteObjectsOutput `json:"-" xml:"-"`
+}
+
+type metadataDeleteObjectsOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type DeletedObject struct {
+ DeleteMarker *bool `type:"boolean"`
+
+ DeleteMarkerVersionID *string `locationName:"DeleteMarkerVersionId" type:"string"`
+
+ Key *string `type:"string"`
+
+ VersionID *string `locationName:"VersionId" type:"string"`
+
+ metadataDeletedObject `json:"-" xml:"-"`
+}
+
+type metadataDeletedObject struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type Destination struct {
+ // Amazon resource name (ARN) of the bucket where you want Amazon S3 to store
+ // replicas of the object identified by the rule.
+ Bucket *string `type:"string" required:"true"`
+
+ metadataDestination `json:"-" xml:"-"`
+}
+
+type metadataDestination struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type Error struct {
+ Code *string `type:"string"`
+
+ Key *string `type:"string"`
+
+ Message *string `type:"string"`
+
+ VersionID *string `locationName:"VersionId" type:"string"`
+
+ metadataError `json:"-" xml:"-"`
+}
+
+type metadataError struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type ErrorDocument struct {
+ // The object key name to use when a 4XX class error occurs.
+ Key *string `type:"string" required:"true"`
+
+ metadataErrorDocument `json:"-" xml:"-"`
+}
+
+type metadataErrorDocument struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type GetBucketACLInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ metadataGetBucketACLInput `json:"-" xml:"-"`
+}
+
+type metadataGetBucketACLInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type GetBucketACLOutput struct {
+ // A list of grants.
+ Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"`
+
+ Owner *Owner `type:"structure"`
+
+ metadataGetBucketACLOutput `json:"-" xml:"-"`
+}
+
+type metadataGetBucketACLOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type GetBucketCORSInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ metadataGetBucketCORSInput `json:"-" xml:"-"`
+}
+
+type metadataGetBucketCORSInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type GetBucketCORSOutput struct {
+ CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"`
+
+ metadataGetBucketCORSOutput `json:"-" xml:"-"`
+}
+
+type metadataGetBucketCORSOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type GetBucketLifecycleInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ metadataGetBucketLifecycleInput `json:"-" xml:"-"`
+}
+
+type metadataGetBucketLifecycleInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type GetBucketLifecycleOutput struct {
+ Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"`
+
+ metadataGetBucketLifecycleOutput `json:"-" xml:"-"`
+}
+
+type metadataGetBucketLifecycleOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type GetBucketLocationInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ metadataGetBucketLocationInput `json:"-" xml:"-"`
+}
+
+type metadataGetBucketLocationInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type GetBucketLocationOutput struct {
+ LocationConstraint *string `type:"string"`
+
+ metadataGetBucketLocationOutput `json:"-" xml:"-"`
+}
+
+type metadataGetBucketLocationOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type GetBucketLoggingInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ metadataGetBucketLoggingInput `json:"-" xml:"-"`
+}
+
+type metadataGetBucketLoggingInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type GetBucketLoggingOutput struct {
+ LoggingEnabled *LoggingEnabled `type:"structure"`
+
+ metadataGetBucketLoggingOutput `json:"-" xml:"-"`
+}
+
+type metadataGetBucketLoggingOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type GetBucketNotificationConfigurationRequest struct {
+ // Name of the buket to get the notification configuration for.
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ metadataGetBucketNotificationConfigurationRequest `json:"-" xml:"-"`
+}
+
+type metadataGetBucketNotificationConfigurationRequest struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type GetBucketPolicyInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ metadataGetBucketPolicyInput `json:"-" xml:"-"`
+}
+
+type metadataGetBucketPolicyInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type GetBucketPolicyOutput struct {
+ // The bucket policy as a JSON document.
+ Policy *string `type:"string"`
+
+ metadataGetBucketPolicyOutput `json:"-" xml:"-"`
+}
+
+type metadataGetBucketPolicyOutput struct {
+ SDKShapeTraits bool `type:"structure" payload:"Policy"`
+}
+
+type GetBucketReplicationInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ metadataGetBucketReplicationInput `json:"-" xml:"-"`
+}
+
+type metadataGetBucketReplicationInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type GetBucketReplicationOutput struct {
+ // Container for replication rules. You can add as many as 1,000 rules. Total
+ // replication configuration size can be up to 2 MB.
+ ReplicationConfiguration *ReplicationConfiguration `type:"structure"`
+
+ metadataGetBucketReplicationOutput `json:"-" xml:"-"`
+}
+
+type metadataGetBucketReplicationOutput struct {
+ SDKShapeTraits bool `type:"structure" payload:"ReplicationConfiguration"`
+}
+
+type GetBucketRequestPaymentInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ metadataGetBucketRequestPaymentInput `json:"-" xml:"-"`
+}
+
+type metadataGetBucketRequestPaymentInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type GetBucketRequestPaymentOutput struct {
+ // Specifies who pays for the download and request fees.
+ Payer *string `type:"string"`
+
+ metadataGetBucketRequestPaymentOutput `json:"-" xml:"-"`
+}
+
+type metadataGetBucketRequestPaymentOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type GetBucketTaggingInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ metadataGetBucketTaggingInput `json:"-" xml:"-"`
+}
+
+type metadataGetBucketTaggingInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type GetBucketTaggingOutput struct {
+ TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"`
+
+ metadataGetBucketTaggingOutput `json:"-" xml:"-"`
+}
+
+type metadataGetBucketTaggingOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type GetBucketVersioningInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ metadataGetBucketVersioningInput `json:"-" xml:"-"`
+}
+
+type metadataGetBucketVersioningInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type GetBucketVersioningOutput struct {
+ // Specifies whether MFA delete is enabled in the bucket versioning configuration.
+ // This element is only returned if the bucket has been configured with MFA
+ // delete. If the bucket has never been so configured, this element is not returned.
+ MFADelete *string `locationName:"MfaDelete" type:"string"`
+
+ // The versioning state of the bucket.
+ Status *string `type:"string"`
+
+ metadataGetBucketVersioningOutput `json:"-" xml:"-"`
+}
+
+type metadataGetBucketVersioningOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type GetBucketWebsiteInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ metadataGetBucketWebsiteInput `json:"-" xml:"-"`
+}
+
+type metadataGetBucketWebsiteInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type GetBucketWebsiteOutput struct {
+ ErrorDocument *ErrorDocument `type:"structure"`
+
+ IndexDocument *IndexDocument `type:"structure"`
+
+ RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"`
+
+ RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"`
+
+ metadataGetBucketWebsiteOutput `json:"-" xml:"-"`
+}
+
+type metadataGetBucketWebsiteOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type GetObjectACLInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ Key *string `location:"uri" locationName:"Key" type:"string" required:"true"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"`
+
+ // VersionId used to reference a specific version of the object.
+ VersionID *string `location:"querystring" locationName:"versionId" type:"string"`
+
+ metadataGetObjectACLInput `json:"-" xml:"-"`
+}
+
+type metadataGetObjectACLInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type GetObjectACLOutput struct {
+ // A list of grants.
+ Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"`
+
+ Owner *Owner `type:"structure"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"`
+
+ metadataGetObjectACLOutput `json:"-" xml:"-"`
+}
+
+type metadataGetObjectACLOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type GetObjectInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Return the object only if its entity tag (ETag) is the same as the one specified,
+ // otherwise return a 412 (precondition failed).
+ IfMatch *string `location:"header" locationName:"If-Match" type:"string"`
+
+ // Return the object only if it has been modified since the specified time,
+ // otherwise return a 304 (not modified).
+ IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Return the object only if its entity tag (ETag) is different from the one
+ // specified, otherwise return a 304 (not modified).
+ IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"`
+
+ // Return the object only if it has not been modified since the specified time,
+ // otherwise return a 412 (precondition failed).
+ IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"`
+
+ Key *string `location:"uri" locationName:"Key" type:"string" required:"true"`
+
+ // Downloads the specified range bytes of an object. For more information about
+ // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
+ Range *string `location:"header" locationName:"Range" type:"string"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"`
+
+ // Sets the Cache-Control header of the response.
+ ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"`
+
+ // Sets the Content-Disposition header of the response
+ ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"`
+
+ // Sets the Content-Encoding header of the response.
+ ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"`
+
+ // Sets the Content-Language header of the response.
+ ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"`
+
+ // Sets the Content-Type header of the response.
+ ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"`
+
+ // Sets the Expires header of the response.
+ ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"iso8601"`
+
+ // Specifies the algorithm to use to when encrypting the object (e.g., AES256,
+ // aws:kms).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // does not store the encryption key. The key must be appropriate for use with
+ // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+ // header.
+ SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // VersionId used to reference a specific version of the object.
+ VersionID *string `location:"querystring" locationName:"versionId" type:"string"`
+
+ metadataGetObjectInput `json:"-" xml:"-"`
+}
+
+type metadataGetObjectInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type GetObjectOutput struct {
+ AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"`
+
+ // Object data.
+ Body io.ReadCloser `type:"blob"`
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+ // Specifies what content encodings have been applied to the object and thus
+ // what decoding mechanisms must be applied to obtain the media-type referenced
+ // by the Content-Type header field.
+ ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+ // The language the content is in.
+ ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+ // Size of the body in bytes.
+ ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"`
+
+ // The portion of the object returned in the response.
+ ContentRange *string `location:"header" locationName:"Content-Range" type:"string"`
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+ // Specifies whether the object retrieved was (true) or was not (false) a Delete
+ // Marker. If false, this response header does not appear in the response.
+ DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"`
+
+ // An ETag is an opaque identifier assigned by a web server to a specific version
+ // of a resource found at a URL
+ ETag *string `location:"header" locationName:"ETag" type:"string"`
+
+ // If the object expiration is configured (see PUT Bucket lifecycle), the response
+ // includes this header. It includes the expiry-date and rule-id key value pairs
+ // providing object expiration information. The value of the rule-id is URL
+ // encoded.
+ Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Last modified date of the object
+ LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"`
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+ // This is set to the number of metadata entries not returned in x-amz-meta
+ // headers. This can happen if you create metadata using an API like SOAP that
+ // supports more flexible metadata than the REST API. For example, using SOAP,
+ // you can create metadata whose values are not legal HTTP headers.
+ MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"`
+
+ ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"`
+
+ // Provides information about object restoration operation and expiration time
+ // of the restored object copy.
+ Restore *string `location:"header" locationName:"x-amz-restore" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"`
+
+ // Version of the object.
+ VersionID *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+
+ // If the bucket is configured as a website, redirects requests for this object
+ // to another object in the same bucket or to an external URL. Amazon S3 stores
+ // the value of this header in the object metadata.
+ WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+
+ metadataGetObjectOutput `json:"-" xml:"-"`
+}
+
+type metadataGetObjectOutput struct {
+ SDKShapeTraits bool `type:"structure" payload:"Body"`
+}
+
+type GetObjectTorrentInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ Key *string `location:"uri" locationName:"Key" type:"string" required:"true"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"`
+
+ metadataGetObjectTorrentInput `json:"-" xml:"-"`
+}
+
+type metadataGetObjectTorrentInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type GetObjectTorrentOutput struct {
+ Body io.ReadCloser `type:"blob"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"`
+
+ metadataGetObjectTorrentOutput `json:"-" xml:"-"`
+}
+
+type metadataGetObjectTorrentOutput struct {
+ SDKShapeTraits bool `type:"structure" payload:"Body"`
+}
+
+type Grant struct {
+ Grantee *Grantee `type:"structure"`
+
+ // Specifies the permission given to the grantee.
+ Permission *string `type:"string"`
+
+ metadataGrant `json:"-" xml:"-"`
+}
+
+type metadataGrant struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type Grantee struct {
+ // Screen name of the grantee.
+ DisplayName *string `type:"string"`
+
+ // Email address of the grantee.
+ EmailAddress *string `type:"string"`
+
+ // The canonical user ID of the grantee.
+ ID *string `type:"string"`
+
+ // Type of grantee
+ Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true"`
+
+ // URI of the grantee group.
+ URI *string `type:"string"`
+
+ metadataGrantee `json:"-" xml:"-"`
+}
+
+type metadataGrantee struct {
+ SDKShapeTraits bool `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"`
+}
+
+type HeadBucketInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ metadataHeadBucketInput `json:"-" xml:"-"`
+}
+
+type metadataHeadBucketInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type HeadBucketOutput struct {
+ metadataHeadBucketOutput `json:"-" xml:"-"`
+}
+
+type metadataHeadBucketOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type HeadObjectInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Return the object only if its entity tag (ETag) is the same as the one specified,
+ // otherwise return a 412 (precondition failed).
+ IfMatch *string `location:"header" locationName:"If-Match" type:"string"`
+
+ // Return the object only if it has been modified since the specified time,
+ // otherwise return a 304 (not modified).
+ IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Return the object only if its entity tag (ETag) is different from the one
+ // specified, otherwise return a 304 (not modified).
+ IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"`
+
+ // Return the object only if it has not been modified since the specified time,
+ // otherwise return a 412 (precondition failed).
+ IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"`
+
+ Key *string `location:"uri" locationName:"Key" type:"string" required:"true"`
+
+ // Downloads the specified range bytes of an object. For more information about
+ // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
+ Range *string `location:"header" locationName:"Range" type:"string"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"`
+
+ // Specifies the algorithm to use to when encrypting the object (e.g., AES256,
+ // aws:kms).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // does not store the encryption key. The key must be appropriate for use with
+ // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+ // header.
+ SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // VersionId used to reference a specific version of the object.
+ VersionID *string `location:"querystring" locationName:"versionId" type:"string"`
+
+ metadataHeadObjectInput `json:"-" xml:"-"`
+}
+
+type metadataHeadObjectInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type HeadObjectOutput struct {
+ AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"`
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+ // Specifies what content encodings have been applied to the object and thus
+ // what decoding mechanisms must be applied to obtain the media-type referenced
+ // by the Content-Type header field.
+ ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+ // The language the content is in.
+ ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+ // Size of the body in bytes.
+ ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"`
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+ // Specifies whether the object retrieved was (true) or was not (false) a Delete
+ // Marker. If false, this response header does not appear in the response.
+ DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"`
+
+ // An ETag is an opaque identifier assigned by a web server to a specific version
+ // of a resource found at a URL
+ ETag *string `location:"header" locationName:"ETag" type:"string"`
+
+ // If the object expiration is configured (see PUT Bucket lifecycle), the response
+ // includes this header. It includes the expiry-date and rule-id key value pairs
+ // providing object expiration information. The value of the rule-id is URL
+ // encoded.
+ Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Last modified date of the object
+ LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"`
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+ // This is set to the number of metadata entries not returned in x-amz-meta
+ // headers. This can happen if you create metadata using an API like SOAP that
+ // supports more flexible metadata than the REST API. For example, using SOAP,
+ // you can create metadata whose values are not legal HTTP headers.
+ MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"`
+
+ ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"`
+
+ // Provides information about object restoration operation and expiration time
+ // of the restored object copy.
+ Restore *string `location:"header" locationName:"x-amz-restore" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"`
+
+ // Version of the object.
+ VersionID *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+
+ // If the bucket is configured as a website, redirects requests for this object
+ // to another object in the same bucket or to an external URL. Amazon S3 stores
+ // the value of this header in the object metadata.
+ WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+
+ metadataHeadObjectOutput `json:"-" xml:"-"`
+}
+
+type metadataHeadObjectOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type IndexDocument struct {
+ // A suffix that is appended to a request that is for a directory on the website
+ // endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/
+ // the data that is returned will be for the object with the key name images/index.html)
+ // The suffix must not be empty and must not include a slash character.
+ Suffix *string `type:"string" required:"true"`
+
+ metadataIndexDocument `json:"-" xml:"-"`
+}
+
+type metadataIndexDocument struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type Initiator struct {
+ // Name of the Principal.
+ DisplayName *string `type:"string"`
+
+ // If the principal is an AWS account, it provides the Canonical User ID. If
+ // the principal is an IAM User, it provides a user ARN value.
+ ID *string `type:"string"`
+
+ metadataInitiator `json:"-" xml:"-"`
+}
+
+type metadataInitiator struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// Container for specifying the AWS Lambda notification configuration.
+type LambdaFunctionConfiguration struct {
+ Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
+
+ // Optional unique identifier for configurations in a notification configuration.
+ // If you don't provide one, Amazon S3 will assign an ID.
+ ID *string `locationName:"Id" type:"string"`
+
+ // Lambda cloud function ARN that Amazon S3 can invoke when it detects events
+ // of the specified type.
+ LambdaFunctionARN *string `locationName:"CloudFunction" type:"string" required:"true"`
+
+ metadataLambdaFunctionConfiguration `json:"-" xml:"-"`
+}
+
+type metadataLambdaFunctionConfiguration struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type LifecycleConfiguration struct {
+ Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"`
+
+ metadataLifecycleConfiguration `json:"-" xml:"-"`
+}
+
+type metadataLifecycleConfiguration struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type LifecycleExpiration struct {
+ // Indicates at what date the object is to be moved or deleted. Should be in
+ // GMT ISO 8601 Format.
+ Date *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ // Indicates the lifetime, in days, of the objects that are subject to the rule.
+ // The value must be a non-zero positive integer.
+ Days *int64 `type:"integer"`
+
+ metadataLifecycleExpiration `json:"-" xml:"-"`
+}
+
+type metadataLifecycleExpiration struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type LifecycleRule struct {
+ Expiration *LifecycleExpiration `type:"structure"`
+
+ // Unique identifier for the rule. The value cannot be longer than 255 characters.
+ ID *string `type:"string"`
+
+ // Specifies when noncurrent object versions expire. Upon expiration, Amazon
+ // S3 permanently deletes the noncurrent object versions. You set this lifecycle
+ // configuration action on a bucket that has versioning enabled (or suspended)
+ // to request that Amazon S3 delete noncurrent object versions at a specific
+ // period in the object's lifetime.
+ NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"`
+
+ // Container for the transition rule that describes when noncurrent objects
+ // transition to the GLACIER storage class. If your bucket is versioning-enabled
+ // (or versioning is suspended), you can set this action to request that Amazon
+ // S3 transition noncurrent object versions to the GLACIER storage class at
+ // a specific period in the object's lifetime.
+ NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"`
+
+ // Prefix identifying one or more objects to which the rule applies.
+ Prefix *string `type:"string" required:"true"`
+
+ // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule
+ // is not currently being applied.
+ Status *string `type:"string" required:"true"`
+
+ Transition *Transition `type:"structure"`
+
+ metadataLifecycleRule `json:"-" xml:"-"`
+}
+
+type metadataLifecycleRule struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type ListBucketsInput struct {
+ metadataListBucketsInput `json:"-" xml:"-"`
+}
+
+type metadataListBucketsInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type ListBucketsOutput struct {
+ Buckets []*Bucket `locationNameList:"Bucket" type:"list"`
+
+ Owner *Owner `type:"structure"`
+
+ metadataListBucketsOutput `json:"-" xml:"-"`
+}
+
+type metadataListBucketsOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type ListMultipartUploadsInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Character you use to group keys.
+ Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"`
+
+ // Requests Amazon S3 to encode the object keys in the response and specifies
+ // the encoding method to use. An object key may contain any Unicode character;
+ // however, XML 1.0 parser cannot parse some characters, such as characters
+ // with an ASCII value from 0 to 10. For characters that are not supported in
+ // XML 1.0, you can add this parameter to request that Amazon S3 encode the
+ // keys in the response.
+ EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string"`
+
+ // Together with upload-id-marker, this parameter specifies the multipart upload
+ // after which listing should begin.
+ KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"`
+
+ // Sets the maximum number of multipart uploads, from 1 to 1,000, to return
+ // in the response body. 1,000 is the maximum number of uploads that can be
+ // returned in a response.
+ MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"`
+
+ // Lists in-progress uploads only for those keys that begin with the specified
+ // prefix.
+ Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
+
+ // Together with key-marker, specifies the multipart upload after which listing
+ // should begin. If key-marker is not specified, the upload-id-marker parameter
+ // is ignored.
+ UploadIDMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"`
+
+ metadataListMultipartUploadsInput `json:"-" xml:"-"`
+}
+
+type metadataListMultipartUploadsInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type ListMultipartUploadsOutput struct {
+ // Name of the bucket to which the multipart upload was initiated.
+ Bucket *string `type:"string"`
+
+ CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
+
+ Delimiter *string `type:"string"`
+
+ // Encoding type used by Amazon S3 to encode object keys in the response.
+ EncodingType *string `type:"string"`
+
+ // Indicates whether the returned list of multipart uploads is truncated. A
+ // value of true indicates that the list was truncated. The list can be truncated
+ // if the number of multipart uploads exceeds the limit allowed or specified
+ // by max uploads.
+ IsTruncated *bool `type:"boolean"`
+
+ // The key at or after which the listing began.
+ KeyMarker *string `type:"string"`
+
+ // Maximum number of multipart uploads that could have been included in the
+ // response.
+ MaxUploads *int64 `type:"integer"`
+
+ // When a list is truncated, this element specifies the value that should be
+ // used for the key-marker request parameter in a subsequent request.
+ NextKeyMarker *string `type:"string"`
+
+ // When a list is truncated, this element specifies the value that should be
+ // used for the upload-id-marker request parameter in a subsequent request.
+ NextUploadIDMarker *string `locationName:"NextUploadIdMarker" type:"string"`
+
+ // When a prefix is provided in the request, this field contains the specified
+ // prefix. The result contains only keys starting with the specified prefix.
+ Prefix *string `type:"string"`
+
+ // Upload ID after which listing began.
+ UploadIDMarker *string `locationName:"UploadIdMarker" type:"string"`
+
+ Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"`
+
+ metadataListMultipartUploadsOutput `json:"-" xml:"-"`
+}
+
+type metadataListMultipartUploadsOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type ListObjectVersionsInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // A delimiter is a character you use to group keys.
+ Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"`
+
+ // Requests Amazon S3 to encode the object keys in the response and specifies
+ // the encoding method to use. An object key may contain any Unicode character;
+ // however, XML 1.0 parser cannot parse some characters, such as characters
+ // with an ASCII value from 0 to 10. For characters that are not supported in
+ // XML 1.0, you can add this parameter to request that Amazon S3 encode the
+ // keys in the response.
+ EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string"`
+
+ // Specifies the key to start with when listing objects in a bucket.
+ KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"`
+
+ // Sets the maximum number of keys returned in the response. The response might
+ // contain fewer keys but will never contain more.
+ MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"`
+
+ // Limits the response to keys that begin with the specified prefix.
+ Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
+
+ // Specifies the object version you want to start listing from.
+ VersionIDMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"`
+
+ metadataListObjectVersionsInput `json:"-" xml:"-"`
+}
+
+type metadataListObjectVersionsInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type ListObjectVersionsOutput struct {
+ CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
+
+ DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"`
+
+ Delimiter *string `type:"string"`
+
+ // Encoding type used by Amazon S3 to encode object keys in the response.
+ EncodingType *string `type:"string"`
+
+ // A flag that indicates whether or not Amazon S3 returned all of the results
+ // that satisfied the search criteria. If your results were truncated, you can
+ // make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker
+ // response parameters as a starting place in another request to return the
+ // rest of the results.
+ IsTruncated *bool `type:"boolean"`
+
+ // Marks the last Key returned in a truncated response.
+ KeyMarker *string `type:"string"`
+
+ MaxKeys *int64 `type:"integer"`
+
+ Name *string `type:"string"`
+
+ // Use this value for the key marker request parameter in a subsequent request.
+ NextKeyMarker *string `type:"string"`
+
+ // Use this value for the next version id marker parameter in a subsequent request.
+ NextVersionIDMarker *string `locationName:"NextVersionIdMarker" type:"string"`
+
+ Prefix *string `type:"string"`
+
+ VersionIDMarker *string `locationName:"VersionIdMarker" type:"string"`
+
+ Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"`
+
+ metadataListObjectVersionsOutput `json:"-" xml:"-"`
+}
+
+type metadataListObjectVersionsOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type ListObjectsInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // A delimiter is a character you use to group keys.
+ Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"`
+
+ // Requests Amazon S3 to encode the object keys in the response and specifies
+ // the encoding method to use. An object key may contain any Unicode character;
+ // however, XML 1.0 parser cannot parse some characters, such as characters
+ // with an ASCII value from 0 to 10. For characters that are not supported in
+ // XML 1.0, you can add this parameter to request that Amazon S3 encode the
+ // keys in the response.
+ EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string"`
+
+ // Specifies the key to start with when listing objects in a bucket.
+ Marker *string `location:"querystring" locationName:"marker" type:"string"`
+
+ // Sets the maximum number of keys returned in the response. The response might
+ // contain fewer keys but will never contain more.
+ MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"`
+
+ // Limits the response to keys that begin with the specified prefix.
+ Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
+
+ metadataListObjectsInput `json:"-" xml:"-"`
+}
+
+type metadataListObjectsInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type ListObjectsOutput struct {
+ CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
+
+ Contents []*Object `type:"list" flattened:"true"`
+
+ Delimiter *string `type:"string"`
+
+ // Encoding type used by Amazon S3 to encode object keys in the response.
+ EncodingType *string `type:"string"`
+
+ // A flag that indicates whether or not Amazon S3 returned all of the results
+ // that satisfied the search criteria.
+ IsTruncated *bool `type:"boolean"`
+
+ Marker *string `type:"string"`
+
+ MaxKeys *int64 `type:"integer"`
+
+ Name *string `type:"string"`
+
+ // When response is truncated (the IsTruncated element value in the response
+ // is true), you can use the key name in this field as marker in the subsequent
+ // request to get next set of objects. Amazon S3 lists objects in alphabetical
+ // order Note: This element is returned only if you have delimiter request parameter
+ // specified. If response does not include the NextMaker and it is truncated,
+ // you can use the value of the last Key in the response as the marker in the
+ // subsequent request to get the next set of object keys.
+ NextMarker *string `type:"string"`
+
+ Prefix *string `type:"string"`
+
+ metadataListObjectsOutput `json:"-" xml:"-"`
+}
+
+type metadataListObjectsOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type ListPartsInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ Key *string `location:"uri" locationName:"Key" type:"string" required:"true"`
+
+ // Sets the maximum number of parts to return.
+ MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"`
+
+ // Specifies the part after which listing should begin. Only parts with higher
+ // part numbers will be listed.
+ PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"`
+
+ // Upload ID identifying the multipart upload whose parts are being listed.
+ UploadID *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
+
+ metadataListPartsInput `json:"-" xml:"-"`
+}
+
+type metadataListPartsInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type ListPartsOutput struct {
+ // Name of the bucket to which the multipart upload was initiated.
+ Bucket *string `type:"string"`
+
+ // Identifies who initiated the multipart upload.
+ Initiator *Initiator `type:"structure"`
+
+ // Indicates whether the returned list of parts is truncated.
+ IsTruncated *bool `type:"boolean"`
+
+ // Object key for which the multipart upload was initiated.
+ Key *string `type:"string"`
+
+ // Maximum number of parts that were allowed in the response.
+ MaxParts *int64 `type:"integer"`
+
+ // When a list is truncated, this element specifies the last part in the list,
+ // as well as the value to use for the part-number-marker request parameter
+ // in a subsequent request.
+ NextPartNumberMarker *int64 `type:"integer"`
+
+ Owner *Owner `type:"structure"`
+
+ // Part number after which listing begins.
+ PartNumberMarker *int64 `type:"integer"`
+
+ Parts []*Part `locationName:"Part" type:"list" flattened:"true"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"`
+
+ // The class of storage used to store the object.
+ StorageClass *string `type:"string"`
+
+ // Upload ID identifying the multipart upload whose parts are being listed.
+ UploadID *string `locationName:"UploadId" type:"string"`
+
+ metadataListPartsOutput `json:"-" xml:"-"`
+}
+
+type metadataListPartsOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type LoggingEnabled struct {
+ // Specifies the bucket where you want Amazon S3 to store server access logs.
+ // You can have your logs delivered to any bucket that you own, including the
+ // same bucket that is being logged. You can also configure multiple buckets
+ // to deliver their logs to the same target bucket. In this case you should
+ // choose a different TargetPrefix for each source bucket so that the delivered
+ // log files can be distinguished by key.
+ TargetBucket *string `type:"string"`
+
+ TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"`
+
+ // This element lets you specify a prefix for the keys that the log files will
+ // be stored under.
+ TargetPrefix *string `type:"string"`
+
+ metadataLoggingEnabled `json:"-" xml:"-"`
+}
+
+type metadataLoggingEnabled struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type MultipartUpload struct {
+ // Date and time at which the multipart upload was initiated.
+ Initiated *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ // Identifies who initiated the multipart upload.
+ Initiator *Initiator `type:"structure"`
+
+ // Key of the object for which the multipart upload was initiated.
+ Key *string `type:"string"`
+
+ Owner *Owner `type:"structure"`
+
+ // The class of storage used to store the object.
+ StorageClass *string `type:"string"`
+
+ // Upload ID that identifies the multipart upload.
+ UploadID *string `locationName:"UploadId" type:"string"`
+
+ metadataMultipartUpload `json:"-" xml:"-"`
+}
+
+type metadataMultipartUpload struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// Specifies when noncurrent object versions expire. Upon expiration, Amazon
+// S3 permanently deletes the noncurrent object versions. You set this lifecycle
+// configuration action on a bucket that has versioning enabled (or suspended)
+// to request that Amazon S3 delete noncurrent object versions at a specific
+// period in the object's lifetime.
+type NoncurrentVersionExpiration struct {
+ // Specifies the number of days an object is noncurrent before Amazon S3 can
+ // perform the associated action. For information about the noncurrent days
+ // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent
+ // (/AmazonS3/latest/dev/s3-access-control.html) in the Amazon Simple Storage
+ // Service Developer Guide.
+ NoncurrentDays *int64 `type:"integer"`
+
+ metadataNoncurrentVersionExpiration `json:"-" xml:"-"`
+}
+
+type metadataNoncurrentVersionExpiration struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// Container for the transition rule that describes when noncurrent objects
+// transition to the GLACIER storage class. If your bucket is versioning-enabled
+// (or versioning is suspended), you can set this action to request that Amazon
+// S3 transition noncurrent object versions to the GLACIER storage class at
+// a specific period in the object's lifetime.
+type NoncurrentVersionTransition struct {
+ // Specifies the number of days an object is noncurrent before Amazon S3 can
+ // perform the associated action. For information about the noncurrent days
+ // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent
+ // (/AmazonS3/latest/dev/s3-access-control.html) in the Amazon Simple Storage
+ // Service Developer Guide.
+ NoncurrentDays *int64 `type:"integer"`
+
+ // The class of storage used to store the object.
+ StorageClass *string `type:"string"`
+
+ metadataNoncurrentVersionTransition `json:"-" xml:"-"`
+}
+
+type metadataNoncurrentVersionTransition struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// Container for specifying the notification configuration of the bucket. If
+// this element is empty, notifications are turned off on the bucket.
+type NotificationConfiguration struct {
+ LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"`
+
+ QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"`
+
+ TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"`
+
+ metadataNotificationConfiguration `json:"-" xml:"-"`
+}
+
+type metadataNotificationConfiguration struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type NotificationConfigurationDeprecated struct {
+ CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"`
+
+ QueueConfiguration *QueueConfigurationDeprecated `type:"structure"`
+
+ TopicConfiguration *TopicConfigurationDeprecated `type:"structure"`
+
+ metadataNotificationConfigurationDeprecated `json:"-" xml:"-"`
+}
+
+type metadataNotificationConfigurationDeprecated struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type Object struct {
+ ETag *string `type:"string"`
+
+ Key *string `type:"string"`
+
+ LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ Owner *Owner `type:"structure"`
+
+ Size *int64 `type:"integer"`
+
+ // The class of storage used to store the object.
+ StorageClass *string `type:"string"`
+
+ metadataObject `json:"-" xml:"-"`
+}
+
+type metadataObject struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type ObjectIdentifier struct {
+ // Key name of the object to delete.
+ Key *string `type:"string" required:"true"`
+
+ // VersionId for the specific version of the object to delete.
+ VersionID *string `locationName:"VersionId" type:"string"`
+
+ metadataObjectIdentifier `json:"-" xml:"-"`
+}
+
+type metadataObjectIdentifier struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type ObjectVersion struct {
+ ETag *string `type:"string"`
+
+ // Specifies whether the object is (true) or is not (false) the latest version
+ // of an object.
+ IsLatest *bool `type:"boolean"`
+
+ // The object key.
+ Key *string `type:"string"`
+
+ // Date and time the object was last modified.
+ LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ Owner *Owner `type:"structure"`
+
+ // Size in bytes of the object.
+ Size *int64 `type:"integer"`
+
+ // The class of storage used to store the object.
+ StorageClass *string `type:"string"`
+
+ // Version ID of an object.
+ VersionID *string `locationName:"VersionId" type:"string"`
+
+ metadataObjectVersion `json:"-" xml:"-"`
+}
+
+type metadataObjectVersion struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type Owner struct {
+ DisplayName *string `type:"string"`
+
+ ID *string `type:"string"`
+
+ metadataOwner `json:"-" xml:"-"`
+}
+
+type metadataOwner struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type Part struct {
+ // Entity tag returned when the part was uploaded.
+ ETag *string `type:"string"`
+
+ // Date and time at which the part was uploaded.
+ LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ // Part number identifying the part.
+ PartNumber *int64 `type:"integer"`
+
+ // Size of the uploaded part data.
+ Size *int64 `type:"integer"`
+
+ metadataPart `json:"-" xml:"-"`
+}
+
+type metadataPart struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type PutBucketACLInput struct {
+ // The canned ACL to apply to the bucket.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string"`
+
+ AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Allows grantee the read, write, read ACP, and write ACP permissions on the
+ // bucket.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to list the objects in the bucket.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the bucket ACL.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to create, overwrite, and delete any object in the bucket.
+ GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable bucket.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+
+ metadataPutBucketACLInput `json:"-" xml:"-"`
+}
+
+type metadataPutBucketACLInput struct {
+ SDKShapeTraits bool `type:"structure" payload:"AccessControlPolicy"`
+}
+
+type PutBucketACLOutput struct {
+ metadataPutBucketACLOutput `json:"-" xml:"-"`
+}
+
+type metadataPutBucketACLOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type PutBucketCORSInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure"`
+
+ metadataPutBucketCORSInput `json:"-" xml:"-"`
+}
+
+type metadataPutBucketCORSInput struct {
+ SDKShapeTraits bool `type:"structure" payload:"CORSConfiguration"`
+}
+
+type PutBucketCORSOutput struct {
+ metadataPutBucketCORSOutput `json:"-" xml:"-"`
+}
+
+type metadataPutBucketCORSOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type PutBucketLifecycleInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"`
+
+ metadataPutBucketLifecycleInput `json:"-" xml:"-"`
+}
+
+type metadataPutBucketLifecycleInput struct {
+ SDKShapeTraits bool `type:"structure" payload:"LifecycleConfiguration"`
+}
+
+type PutBucketLifecycleOutput struct {
+ metadataPutBucketLifecycleOutput `json:"-" xml:"-"`
+}
+
+type metadataPutBucketLifecycleOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type PutBucketLoggingInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true"`
+
+ metadataPutBucketLoggingInput `json:"-" xml:"-"`
+}
+
+type metadataPutBucketLoggingInput struct {
+ SDKShapeTraits bool `type:"structure" payload:"BucketLoggingStatus"`
+}
+
+type PutBucketLoggingOutput struct {
+ metadataPutBucketLoggingOutput `json:"-" xml:"-"`
+}
+
+type metadataPutBucketLoggingOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type PutBucketNotificationConfigurationInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Container for specifying the notification configuration of the bucket. If
+ // this element is empty, notifications are turned off on the bucket.
+ NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true"`
+
+ metadataPutBucketNotificationConfigurationInput `json:"-" xml:"-"`
+}
+
+type metadataPutBucketNotificationConfigurationInput struct {
+ SDKShapeTraits bool `type:"structure" payload:"NotificationConfiguration"`
+}
+
+type PutBucketNotificationConfigurationOutput struct {
+ metadataPutBucketNotificationConfigurationOutput `json:"-" xml:"-"`
+}
+
+type metadataPutBucketNotificationConfigurationOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type PutBucketNotificationInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true"`
+
+ metadataPutBucketNotificationInput `json:"-" xml:"-"`
+}
+
+type metadataPutBucketNotificationInput struct {
+ SDKShapeTraits bool `type:"structure" payload:"NotificationConfiguration"`
+}
+
+type PutBucketNotificationOutput struct {
+ metadataPutBucketNotificationOutput `json:"-" xml:"-"`
+}
+
+type metadataPutBucketNotificationOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type PutBucketPolicyInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The bucket policy as a JSON document.
+ Policy *string `type:"string" required:"true"`
+
+ metadataPutBucketPolicyInput `json:"-" xml:"-"`
+}
+
+type metadataPutBucketPolicyInput struct {
+ SDKShapeTraits bool `type:"structure" payload:"Policy"`
+}
+
+type PutBucketPolicyOutput struct {
+ metadataPutBucketPolicyOutput `json:"-" xml:"-"`
+}
+
+type metadataPutBucketPolicyOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type PutBucketReplicationInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Container for replication rules. You can add as many as 1,000 rules. Total
+ // replication configuration size can be up to 2 MB.
+ ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true"`
+
+ metadataPutBucketReplicationInput `json:"-" xml:"-"`
+}
+
+type metadataPutBucketReplicationInput struct {
+ SDKShapeTraits bool `type:"structure" payload:"ReplicationConfiguration"`
+}
+
+type PutBucketReplicationOutput struct {
+ metadataPutBucketReplicationOutput `json:"-" xml:"-"`
+}
+
+type metadataPutBucketReplicationOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type PutBucketRequestPaymentInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true"`
+
+ metadataPutBucketRequestPaymentInput `json:"-" xml:"-"`
+}
+
+type metadataPutBucketRequestPaymentInput struct {
+ SDKShapeTraits bool `type:"structure" payload:"RequestPaymentConfiguration"`
+}
+
+type PutBucketRequestPaymentOutput struct {
+ metadataPutBucketRequestPaymentOutput `json:"-" xml:"-"`
+}
+
+type metadataPutBucketRequestPaymentOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type PutBucketTaggingInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"`
+
+ metadataPutBucketTaggingInput `json:"-" xml:"-"`
+}
+
+type metadataPutBucketTaggingInput struct {
+ SDKShapeTraits bool `type:"structure" payload:"Tagging"`
+}
+
+type PutBucketTaggingOutput struct {
+ metadataPutBucketTaggingOutput `json:"-" xml:"-"`
+}
+
+type metadataPutBucketTaggingOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type PutBucketVersioningInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The concatenation of the authentication device's serial number, a space,
+ // and the value that is displayed on your authentication device.
+ MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"`
+
+ VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true"`
+
+ metadataPutBucketVersioningInput `json:"-" xml:"-"`
+}
+
+type metadataPutBucketVersioningInput struct {
+ SDKShapeTraits bool `type:"structure" payload:"VersioningConfiguration"`
+}
+
+type PutBucketVersioningOutput struct {
+ metadataPutBucketVersioningOutput `json:"-" xml:"-"`
+}
+
+type metadataPutBucketVersioningOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type PutBucketWebsiteInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true"`
+
+ metadataPutBucketWebsiteInput `json:"-" xml:"-"`
+}
+
+type metadataPutBucketWebsiteInput struct {
+ SDKShapeTraits bool `type:"structure" payload:"WebsiteConfiguration"`
+}
+
+type PutBucketWebsiteOutput struct {
+ metadataPutBucketWebsiteOutput `json:"-" xml:"-"`
+}
+
+type metadataPutBucketWebsiteOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type PutObjectACLInput struct {
+ // The canned ACL to apply to the object.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string"`
+
+ AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Allows grantee the read, write, read ACP, and write ACP permissions on the
+ // bucket.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to list the objects in the bucket.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the bucket ACL.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to create, overwrite, and delete any object in the bucket.
+ GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable bucket.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+
+ Key *string `location:"uri" locationName:"Key" type:"string" required:"true"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"`
+
+ metadataPutObjectACLInput `json:"-" xml:"-"`
+}
+
+type metadataPutObjectACLInput struct {
+ SDKShapeTraits bool `type:"structure" payload:"AccessControlPolicy"`
+}
+
+type PutObjectACLOutput struct {
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"`
+
+ metadataPutObjectACLOutput `json:"-" xml:"-"`
+}
+
+type metadataPutObjectACLOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type PutObjectInput struct {
+ // The canned ACL to apply to the object.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string"`
+
+ // Object data.
+ Body io.ReadSeeker `type:"blob"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+ // Specifies what content encodings have been applied to the object and thus
+ // what decoding mechanisms must be applied to obtain the media-type referenced
+ // by the Content-Type header field.
+ ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+ // The language the content is in.
+ ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+ // Size of the body in bytes. This parameter is useful when the size of the
+ // body cannot be determined automatically.
+ ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"`
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to read the object data and its metadata.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the object ACL.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable object.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+
+ Key *string `location:"uri" locationName:"Key" type:"string" required:"true"`
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"`
+
+ // Specifies the algorithm to use to when encrypting the object (e.g., AES256,
+ // aws:kms).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // does not store the encryption key. The key must be appropriate for use with
+ // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+ // header.
+ SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
+ // requests for an object protected by AWS KMS will fail if not made via SSL
+ // or using SigV4. Documentation on configuring any of the officially supported
+ // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
+ SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"`
+
+ // The type of storage to use for the object. Defaults to 'STANDARD'.
+ StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string"`
+
+ // If the bucket is configured as a website, redirects requests for this object
+ // to another object in the same bucket or to an external URL. Amazon S3 stores
+ // the value of this header in the object metadata.
+ WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+
+ metadataPutObjectInput `json:"-" xml:"-"`
+}
+
+type metadataPutObjectInput struct {
+ SDKShapeTraits bool `type:"structure" payload:"Body"`
+}
+
+type PutObjectOutput struct {
+ // Entity tag for the uploaded object.
+ ETag *string `location:"header" locationName:"ETag" type:"string"`
+
+ // If the object expiration is configured, this will contain the expiration
+ // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.
+ Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"`
+
+ // Version of the object.
+ VersionID *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+
+ metadataPutObjectOutput `json:"-" xml:"-"`
+}
+
+type metadataPutObjectOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// Container for specifying an configuration when you want Amazon S3 to publish
+// events to an Amazon Simple Queue Service (Amazon SQS) queue.
+type QueueConfiguration struct {
+ Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
+
+ // Optional unique identifier for configurations in a notification configuration.
+ // If you don't provide one, Amazon S3 will assign an ID.
+ ID *string `locationName:"Id" type:"string"`
+
+ // Amazon SQS queue ARN to which Amazon S3 will publish a message when it detects
+ // events of specified type.
+ QueueARN *string `locationName:"Queue" type:"string" required:"true"`
+
+ metadataQueueConfiguration `json:"-" xml:"-"`
+}
+
+type metadataQueueConfiguration struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type QueueConfigurationDeprecated struct {
+ // Bucket event for which to send notifications.
+ Event *string `type:"string"`
+
+ Events []*string `locationName:"Event" type:"list" flattened:"true"`
+
+ // Optional unique identifier for configurations in a notification configuration.
+ // If you don't provide one, Amazon S3 will assign an ID.
+ ID *string `locationName:"Id" type:"string"`
+
+ Queue *string `type:"string"`
+
+ metadataQueueConfigurationDeprecated `json:"-" xml:"-"`
+}
+
+type metadataQueueConfigurationDeprecated struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type Redirect struct {
+ // The HTTP redirect code to use on the response. Not required if one of the
+ // siblings is present.
+ HTTPRedirectCode *string `locationName:"HttpRedirectCode" type:"string"`
+
+ // The host name to use in the redirect request.
+ HostName *string `type:"string"`
+
+ // Protocol to use (http, https) when redirecting requests. The default is the
+ // protocol that is used in the original request.
+ Protocol *string `type:"string"`
+
+ // The object key prefix to use in the redirect request. For example, to redirect
+ // requests for all pages with prefix docs/ (objects in the docs/ folder) to
+ // documents/, you can set a condition block with KeyPrefixEquals set to docs/
+ // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required
+ // if one of the siblings is present. Can be present only if ReplaceKeyWith
+ // is not provided.
+ ReplaceKeyPrefixWith *string `type:"string"`
+
+ // The specific object key to use in the redirect request. For example, redirect
+ // request to error.html. Not required if one of the sibling is present. Can
+ // be present only if ReplaceKeyPrefixWith is not provided.
+ ReplaceKeyWith *string `type:"string"`
+
+ metadataRedirect `json:"-" xml:"-"`
+}
+
+type metadataRedirect struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type RedirectAllRequestsTo struct {
+ // Name of the host where requests will be redirected.
+ HostName *string `type:"string" required:"true"`
+
+ // Protocol to use (http, https) when redirecting requests. The default is the
+ // protocol that is used in the original request.
+ Protocol *string `type:"string"`
+
+ metadataRedirectAllRequestsTo `json:"-" xml:"-"`
+}
+
+type metadataRedirectAllRequestsTo struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// Container for replication rules. You can add as many as 1,000 rules. Total
+// replication configuration size can be up to 2 MB.
+type ReplicationConfiguration struct {
+ // Amazon Resource Name (ARN) of an IAM role for Amazon S3 to assume when replicating
+ // the objects.
+ Role *string `type:"string" required:"true"`
+
+ // Container for information about a particular replication rule. Replication
+ // configuration must have at least one rule and can contain up to 1,000 rules.
+ Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"`
+
+ metadataReplicationConfiguration `json:"-" xml:"-"`
+}
+
+type metadataReplicationConfiguration struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type ReplicationRule struct {
+ Destination *Destination `type:"structure" required:"true"`
+
+ // Unique identifier for the rule. The value cannot be longer than 255 characters.
+ ID *string `type:"string"`
+
+ // Object keyname prefix identifying one or more objects to which the rule applies.
+ // Maximum prefix length can be up to 1,024 characters. Overlapping prefixes
+ // are not supported.
+ Prefix *string `type:"string" required:"true"`
+
+ // The rule is ignored if status is not Enabled.
+ Status *string `type:"string" required:"true"`
+
+ metadataReplicationRule `json:"-" xml:"-"`
+}
+
+type metadataReplicationRule struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type RequestPaymentConfiguration struct {
+ // Specifies who pays for the download and request fees.
+ Payer *string `type:"string" required:"true"`
+
+ metadataRequestPaymentConfiguration `json:"-" xml:"-"`
+}
+
+type metadataRequestPaymentConfiguration struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type RestoreObjectInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ Key *string `location:"uri" locationName:"Key" type:"string" required:"true"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"`
+
+ RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure"`
+
+ VersionID *string `location:"querystring" locationName:"versionId" type:"string"`
+
+ metadataRestoreObjectInput `json:"-" xml:"-"`
+}
+
+type metadataRestoreObjectInput struct {
+ SDKShapeTraits bool `type:"structure" payload:"RestoreRequest"`
+}
+
+type RestoreObjectOutput struct {
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"`
+
+ metadataRestoreObjectOutput `json:"-" xml:"-"`
+}
+
+type metadataRestoreObjectOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type RestoreRequest struct {
+ // Lifetime of the active copy in days
+ Days *int64 `type:"integer" required:"true"`
+
+ metadataRestoreRequest `json:"-" xml:"-"`
+}
+
+type metadataRestoreRequest struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type RoutingRule struct {
+ // A container for describing a condition that must be met for the specified
+ // redirect to apply. For example, 1. If request is for pages in the /docs folder,
+ // redirect to the /documents folder. 2. If request results in HTTP error 4xx,
+ // redirect request to another host where you might process the error.
+ Condition *Condition `type:"structure"`
+
+ // Container for redirect information. You can redirect requests to another
+ // host, to another page, or with another protocol. In the event of an error,
+ // you can can specify a different error code to return.
+ Redirect *Redirect `type:"structure" required:"true"`
+
+ metadataRoutingRule `json:"-" xml:"-"`
+}
+
+type metadataRoutingRule struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type Tag struct {
+ // Name of the tag.
+ Key *string `type:"string" required:"true"`
+
+ // Value of the tag.
+ Value *string `type:"string" required:"true"`
+
+ metadataTag `json:"-" xml:"-"`
+}
+
+type metadataTag struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type Tagging struct {
+ TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"`
+
+ metadataTagging `json:"-" xml:"-"`
+}
+
+type metadataTagging struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type TargetGrant struct {
+ Grantee *Grantee `type:"structure"`
+
+ // Logging permissions assigned to the Grantee for the bucket.
+ Permission *string `type:"string"`
+
+ metadataTargetGrant `json:"-" xml:"-"`
+}
+
+type metadataTargetGrant struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// Container for specifying the configuration when you want Amazon S3 to publish
+// events to an Amazon Simple Notification Service (Amazon SNS) topic.
+type TopicConfiguration struct {
+ Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
+
+ // Optional unique identifier for configurations in a notification configuration.
+ // If you don't provide one, Amazon S3 will assign an ID.
+ ID *string `locationName:"Id" type:"string"`
+
+ // Amazon SNS topic ARN to which Amazon S3 will publish a message when it detects
+ // events of specified type.
+ TopicARN *string `locationName:"Topic" type:"string" required:"true"`
+
+ metadataTopicConfiguration `json:"-" xml:"-"`
+}
+
+type metadataTopicConfiguration struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type TopicConfigurationDeprecated struct {
+ // Bucket event for which to send notifications.
+ Event *string `type:"string"`
+
+ Events []*string `locationName:"Event" type:"list" flattened:"true"`
+
+ // Optional unique identifier for configurations in a notification configuration.
+ // If you don't provide one, Amazon S3 will assign an ID.
+ ID *string `locationName:"Id" type:"string"`
+
+ // Amazon SNS topic to which Amazon S3 will publish a message to report the
+ // specified events for the bucket.
+ Topic *string `type:"string"`
+
+ metadataTopicConfigurationDeprecated `json:"-" xml:"-"`
+}
+
+type metadataTopicConfigurationDeprecated struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type Transition struct {
+ // Indicates at what date the object is to be moved or deleted. Should be in
+ // GMT ISO 8601 Format.
+ Date *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ // Indicates the lifetime, in days, of the objects that are subject to the rule.
+ // The value must be a non-zero positive integer.
+ Days *int64 `type:"integer"`
+
+ // The class of storage used to store the object.
+ StorageClass *string `type:"string"`
+
+ metadataTransition `json:"-" xml:"-"`
+}
+
+type metadataTransition struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type UploadPartCopyInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The name of the source bucket and key name of the source object, separated
+ // by a slash (/). Must be URL-encoded.
+ CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"`
+
+ // Copies the object if its entity tag (ETag) matches the specified tag.
+ CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"`
+
+ // Copies the object if it has been modified since the specified time.
+ CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Copies the object if its entity tag (ETag) is different than the specified
+ // ETag.
+ CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"`
+
+ // Copies the object if it hasn't been modified since the specified time.
+ CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"`
+
+ // The range of bytes to copy from the source object. The range value must use
+ // the form bytes=first-last, where the first and last are the zero-based byte
+ // offsets to copy. For example, bytes=0-9 indicates that you want to copy the
+ // first ten bytes of the source. You can copy a range only if the source object
+ // is greater than 5 GB.
+ CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"`
+
+ // Specifies the algorithm to use when decrypting the source object (e.g., AES256).
+ CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt
+ // the source object. The encryption key provided in this header must be one
+ // that was used when the source object was created.
+ CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"`
+
+ Key *string `location:"uri" locationName:"Key" type:"string" required:"true"`
+
+ // Part number of part being copied.
+ PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"`
+
+ // Specifies the algorithm to use to when encrypting the object (e.g., AES256,
+ // aws:kms).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // does not store the encryption key. The key must be appropriate for use with
+ // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+ // header. This must be the same encryption key specified in the initiate multipart
+ // upload request.
+ SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // Upload ID identifying the multipart upload whose part is being copied.
+ UploadID *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
+
+ metadataUploadPartCopyInput `json:"-" xml:"-"`
+}
+
+type metadataUploadPartCopyInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type UploadPartCopyOutput struct {
+ CopyPartResult *CopyPartResult `type:"structure"`
+
+ // The version of the source object that was copied, if you have enabled versioning
+ // on the source bucket.
+ CopySourceVersionID *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"`
+
+ metadataUploadPartCopyOutput `json:"-" xml:"-"`
+}
+
+type metadataUploadPartCopyOutput struct {
+ SDKShapeTraits bool `type:"structure" payload:"CopyPartResult"`
+}
+
+type UploadPartInput struct {
+ Body io.ReadSeeker `type:"blob"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Size of the body in bytes. This parameter is useful when the size of the
+ // body cannot be determined automatically.
+ ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"`
+
+ Key *string `location:"uri" locationName:"Key" type:"string" required:"true"`
+
+ // Part number of part being uploaded.
+ PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"`
+
+ // Specifies the algorithm to use to when encrypting the object (e.g., AES256,
+ // aws:kms).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // does not store the encryption key. The key must be appropriate for use with
+ // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+ // header. This must be the same encryption key specified in the initiate multipart
+ // upload request.
+ SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // Upload ID identifying the multipart upload whose part is being uploaded.
+ UploadID *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
+
+ metadataUploadPartInput `json:"-" xml:"-"`
+}
+
+type metadataUploadPartInput struct {
+ SDKShapeTraits bool `type:"structure" payload:"Body"`
+}
+
+type UploadPartOutput struct {
+ // Entity tag for the uploaded object.
+ ETag *string `location:"header" locationName:"ETag" type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"`
+
+ metadataUploadPartOutput `json:"-" xml:"-"`
+}
+
+type metadataUploadPartOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type VersioningConfiguration struct {
+ // Specifies whether MFA delete is enabled in the bucket versioning configuration.
+ // This element is only returned if the bucket has been configured with MFA
+ // delete. If the bucket has never been so configured, this element is not returned.
+ MFADelete *string `locationName:"MfaDelete" type:"string"`
+
+ // The versioning state of the bucket.
+ Status *string `type:"string"`
+
+ metadataVersioningConfiguration `json:"-" xml:"-"`
+}
+
+type metadataVersioningConfiguration struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type WebsiteConfiguration struct {
+ ErrorDocument *ErrorDocument `type:"structure"`
+
+ IndexDocument *IndexDocument `type:"structure"`
+
+ RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"`
+
+ RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"`
+
+ metadataWebsiteConfiguration `json:"-" xml:"-"`
+}
+
+type metadataWebsiteConfiguration struct {
+ SDKShapeTraits bool `type:"structure"`
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/bucket_location.go
new file mode 100644
index 0000000..59eb845
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/bucket_location.go
@@ -0,0 +1,42 @@
+package s3
+
+import (
+ "io/ioutil"
+ "regexp"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+)
+
+var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`)
+
+func buildGetBucketLocation(r *aws.Request) {
+ if r.DataFilled() {
+ out := r.Data.(*GetBucketLocationOutput)
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed reading response body", err)
+ return
+ }
+
+ match := reBucketLocation.FindSubmatch(b)
+ if len(match) > 1 {
+ loc := string(match[1])
+ out.LocationConstraint = &loc
+ }
+ }
+}
+
+func populateLocationConstraint(r *aws.Request) {
+ if r.ParamsFilled() && r.Config.Region != "us-east-1" {
+ in := r.Params.(*CreateBucketInput)
+ if in.CreateBucketConfiguration == nil {
+ r.Params = awsutil.CopyOf(r.Params)
+ in = r.Params.(*CreateBucketInput)
+ in.CreateBucketConfiguration = &CreateBucketConfiguration{
+ LocationConstraint: &r.Config.Region,
+ }
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/bucket_location_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/bucket_location_test.go
new file mode 100644
index 0000000..f168854
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/bucket_location_test.go
@@ -0,0 +1,75 @@
+package s3_test
+
+import (
+ "bytes"
+ "io/ioutil"
+ "net/http"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/internal/test/unit"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/stretchr/testify/assert"
+)
+
+var _ = unit.Imported
+var s3LocationTests = []struct {
+ body string
+ loc string
+}{
+ {``, ``},
+ {`EU`, `EU`},
+}
+
+func TestGetBucketLocation(t *testing.T) {
+ for _, test := range s3LocationTests {
+ s := s3.New(nil)
+ s.Handlers.Send.Clear()
+ s.Handlers.Send.PushBack(func(r *aws.Request) {
+ reader := ioutil.NopCloser(bytes.NewReader([]byte(test.body)))
+ r.HTTPResponse = &http.Response{StatusCode: 200, Body: reader}
+ })
+
+ resp, err := s.GetBucketLocation(&s3.GetBucketLocationInput{Bucket: aws.String("bucket")})
+ assert.NoError(t, err)
+ if test.loc == "" {
+ assert.Nil(t, resp.LocationConstraint)
+ } else {
+ assert.Equal(t, test.loc, *resp.LocationConstraint)
+ }
+ }
+}
+
+func TestPopulateLocationConstraint(t *testing.T) {
+ s := s3.New(nil)
+ in := &s3.CreateBucketInput{
+ Bucket: aws.String("bucket"),
+ }
+ req, _ := s.CreateBucketRequest(in)
+ err := req.Build()
+ assert.NoError(t, err)
+ assert.Equal(t, "mock-region", awsutil.ValuesAtPath(req.Params, "CreateBucketConfiguration.LocationConstraint")[0])
+ assert.Nil(t, in.CreateBucketConfiguration) // don't modify original params
+}
+
+func TestNoPopulateLocationConstraintIfProvided(t *testing.T) {
+ s := s3.New(nil)
+ req, _ := s.CreateBucketRequest(&s3.CreateBucketInput{
+ Bucket: aws.String("bucket"),
+ CreateBucketConfiguration: &s3.CreateBucketConfiguration{},
+ })
+ err := req.Build()
+ assert.NoError(t, err)
+ assert.Equal(t, 0, len(awsutil.ValuesAtPath(req.Params, "CreateBucketConfiguration.LocationConstraint")))
+}
+
+func TestNoPopulateLocationConstraintIfClassic(t *testing.T) {
+ s := s3.New(&aws.Config{Region: "us-east-1"})
+ req, _ := s.CreateBucketRequest(&s3.CreateBucketInput{
+ Bucket: aws.String("bucket"),
+ })
+ err := req.Build()
+ assert.NoError(t, err)
+ assert.Equal(t, 0, len(awsutil.ValuesAtPath(req.Params, "CreateBucketConfiguration.LocationConstraint")))
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/content_md5.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/content_md5.go
new file mode 100644
index 0000000..386f09a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/content_md5.go
@@ -0,0 +1,36 @@
+package s3
+
+import (
+ "crypto/md5"
+ "encoding/base64"
+ "io"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// contentMD5 computes and sets the HTTP Content-MD5 header for requests that
+// require it.
+func contentMD5(r *aws.Request) {
+ h := md5.New()
+
+ // hash the body. seek back to the first position after reading to reset
+ // the body for transmission. copy errors may be assumed to be from the
+ // body.
+ _, err := io.Copy(h, r.Body)
+ if err != nil {
+ r.Error = awserr.New("ContentMD5", "failed to read body", err)
+ return
+ }
+ _, err = r.Body.Seek(0, 0)
+ if err != nil {
+ r.Error = awserr.New("ContentMD5", "failed to seek body", err)
+ return
+ }
+
+ // encode the md5 checksum in base64 and set the request header.
+ sum := h.Sum(nil)
+ sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum)))
+ base64.StdEncoding.Encode(sum64, sum)
+ r.HTTPRequest.Header.Set("Content-MD5", string(sum64))
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/customizations.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/customizations.go
new file mode 100644
index 0000000..490ff2d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/customizations.go
@@ -0,0 +1,32 @@
+package s3
+
+import "github.com/aws/aws-sdk-go/aws"
+
+func init() {
+ initService = func(s *aws.Service) {
+ // Support building custom host-style bucket endpoints
+ s.Handlers.Build.PushFront(updateHostWithBucket)
+
+ // Require SSL when using SSE keys
+ s.Handlers.Validate.PushBack(validateSSERequiresSSL)
+ s.Handlers.Build.PushBack(computeSSEKeys)
+
+ // S3 uses custom error unmarshaling logic
+ s.Handlers.UnmarshalError.Clear()
+ s.Handlers.UnmarshalError.PushBack(unmarshalError)
+ }
+
+ initRequest = func(r *aws.Request) {
+ switch r.Operation.Name {
+ case opPutBucketCORS, opPutBucketLifecycle, opPutBucketPolicy, opPutBucketTagging, opDeleteObjects:
+ // These S3 operations require Content-MD5 to be set
+ r.Handlers.Build.PushBack(contentMD5)
+ case opGetBucketLocation:
+ // GetBucketLocation has custom parsing logic
+ r.Handlers.Unmarshal.PushFront(buildGetBucketLocation)
+ case opCreateBucket:
+ // Auto-populate LocationConstraint with current region
+ r.Handlers.Validate.PushFront(populateLocationConstraint)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/customizations_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/customizations_test.go
new file mode 100644
index 0000000..523b487
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/customizations_test.go
@@ -0,0 +1,90 @@
+package s3_test
+
+import (
+ "crypto/md5"
+ "encoding/base64"
+ "io/ioutil"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/internal/test/unit"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/stretchr/testify/assert"
+)
+
+var _ = unit.Imported
+
+func assertMD5(t *testing.T, req *aws.Request) {
+ err := req.Build()
+ assert.NoError(t, err)
+
+ b, _ := ioutil.ReadAll(req.HTTPRequest.Body)
+ out := md5.Sum(b)
+ assert.NotEmpty(t, b)
+ assert.Equal(t, base64.StdEncoding.EncodeToString(out[:]), req.HTTPRequest.Header.Get("Content-MD5"))
+}
+
+func TestMD5InPutBucketCORS(t *testing.T) {
+ svc := s3.New(nil)
+ req, _ := svc.PutBucketCORSRequest(&s3.PutBucketCORSInput{
+ Bucket: aws.String("bucketname"),
+ CORSConfiguration: &s3.CORSConfiguration{
+ CORSRules: []*s3.CORSRule{
+ {AllowedMethods: []*string{aws.String("GET")}},
+ },
+ },
+ })
+ assertMD5(t, req)
+}
+
+func TestMD5InPutBucketLifecycle(t *testing.T) {
+ svc := s3.New(nil)
+ req, _ := svc.PutBucketLifecycleRequest(&s3.PutBucketLifecycleInput{
+ Bucket: aws.String("bucketname"),
+ LifecycleConfiguration: &s3.LifecycleConfiguration{
+ Rules: []*s3.LifecycleRule{
+ {
+ ID: aws.String("ID"),
+ Prefix: aws.String("Prefix"),
+ Status: aws.String("Enabled"),
+ },
+ },
+ },
+ })
+ assertMD5(t, req)
+}
+
+func TestMD5InPutBucketPolicy(t *testing.T) {
+ svc := s3.New(nil)
+ req, _ := svc.PutBucketPolicyRequest(&s3.PutBucketPolicyInput{
+ Bucket: aws.String("bucketname"),
+ Policy: aws.String("{}"),
+ })
+ assertMD5(t, req)
+}
+
+func TestMD5InPutBucketTagging(t *testing.T) {
+ svc := s3.New(nil)
+ req, _ := svc.PutBucketTaggingRequest(&s3.PutBucketTaggingInput{
+ Bucket: aws.String("bucketname"),
+ Tagging: &s3.Tagging{
+ TagSet: []*s3.Tag{
+ {Key: aws.String("KEY"), Value: aws.String("VALUE")},
+ },
+ },
+ })
+ assertMD5(t, req)
+}
+
+func TestMD5InDeleteObjects(t *testing.T) {
+ svc := s3.New(nil)
+ req, _ := svc.DeleteObjectsRequest(&s3.DeleteObjectsInput{
+ Bucket: aws.String("bucketname"),
+ Delete: &s3.Delete{
+ Objects: []*s3.ObjectIdentifier{
+ {Key: aws.String("key")},
+ },
+ },
+ })
+ assertMD5(t, req)
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/examples_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/examples_test.go
new file mode 100644
index 0000000..7dde748
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/examples_test.go
@@ -0,0 +1,1928 @@
+// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+
+package s3_test
+
+import (
+ "bytes"
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/service/s3"
+)
+
+var _ time.Duration
+var _ bytes.Buffer
+
+func ExampleS3_AbortMultipartUpload() {
+ svc := s3.New(nil)
+
+ params := &s3.AbortMultipartUploadInput{
+ Bucket: aws.String("BucketName"), // Required
+ Key: aws.String("ObjectKey"), // Required
+ UploadID: aws.String("MultipartUploadId"), // Required
+ RequestPayer: aws.String("RequestPayer"),
+ }
+ resp, err := svc.AbortMultipartUpload(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_CompleteMultipartUpload() {
+ svc := s3.New(nil)
+
+ params := &s3.CompleteMultipartUploadInput{
+ Bucket: aws.String("BucketName"), // Required
+ Key: aws.String("ObjectKey"), // Required
+ UploadID: aws.String("MultipartUploadId"), // Required
+ MultipartUpload: &s3.CompletedMultipartUpload{
+ Parts: []*s3.CompletedPart{
+ { // Required
+ ETag: aws.String("ETag"),
+ PartNumber: aws.Long(1),
+ },
+ // More values...
+ },
+ },
+ RequestPayer: aws.String("RequestPayer"),
+ }
+ resp, err := svc.CompleteMultipartUpload(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_CopyObject() {
+ svc := s3.New(nil)
+
+ params := &s3.CopyObjectInput{
+ Bucket: aws.String("BucketName"), // Required
+ CopySource: aws.String("CopySource"), // Required
+ Key: aws.String("ObjectKey"), // Required
+ ACL: aws.String("ObjectCannedACL"),
+ CacheControl: aws.String("CacheControl"),
+ ContentDisposition: aws.String("ContentDisposition"),
+ ContentEncoding: aws.String("ContentEncoding"),
+ ContentLanguage: aws.String("ContentLanguage"),
+ ContentType: aws.String("ContentType"),
+ CopySourceIfMatch: aws.String("CopySourceIfMatch"),
+ CopySourceIfModifiedSince: aws.Time(time.Now()),
+ CopySourceIfNoneMatch: aws.String("CopySourceIfNoneMatch"),
+ CopySourceIfUnmodifiedSince: aws.Time(time.Now()),
+ CopySourceSSECustomerAlgorithm: aws.String("CopySourceSSECustomerAlgorithm"),
+ CopySourceSSECustomerKey: aws.String("CopySourceSSECustomerKey"),
+ CopySourceSSECustomerKeyMD5: aws.String("CopySourceSSECustomerKeyMD5"),
+ Expires: aws.Time(time.Now()),
+ GrantFullControl: aws.String("GrantFullControl"),
+ GrantRead: aws.String("GrantRead"),
+ GrantReadACP: aws.String("GrantReadACP"),
+ GrantWriteACP: aws.String("GrantWriteACP"),
+ Metadata: map[string]*string{
+ "Key": aws.String("MetadataValue"), // Required
+ // More values...
+ },
+ MetadataDirective: aws.String("MetadataDirective"),
+ RequestPayer: aws.String("RequestPayer"),
+ SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"),
+ SSECustomerKey: aws.String("SSECustomerKey"),
+ SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"),
+ SSEKMSKeyID: aws.String("SSEKMSKeyId"),
+ ServerSideEncryption: aws.String("ServerSideEncryption"),
+ StorageClass: aws.String("StorageClass"),
+ WebsiteRedirectLocation: aws.String("WebsiteRedirectLocation"),
+ }
+ resp, err := svc.CopyObject(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_CreateBucket() {
+ svc := s3.New(nil)
+
+ params := &s3.CreateBucketInput{
+ Bucket: aws.String("BucketName"), // Required
+ ACL: aws.String("BucketCannedACL"),
+ CreateBucketConfiguration: &s3.CreateBucketConfiguration{
+ LocationConstraint: aws.String("BucketLocationConstraint"),
+ },
+ GrantFullControl: aws.String("GrantFullControl"),
+ GrantRead: aws.String("GrantRead"),
+ GrantReadACP: aws.String("GrantReadACP"),
+ GrantWrite: aws.String("GrantWrite"),
+ GrantWriteACP: aws.String("GrantWriteACP"),
+ }
+ resp, err := svc.CreateBucket(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_CreateMultipartUpload() {
+ svc := s3.New(nil)
+
+ params := &s3.CreateMultipartUploadInput{
+ Bucket: aws.String("BucketName"), // Required
+ Key: aws.String("ObjectKey"), // Required
+ ACL: aws.String("ObjectCannedACL"),
+ CacheControl: aws.String("CacheControl"),
+ ContentDisposition: aws.String("ContentDisposition"),
+ ContentEncoding: aws.String("ContentEncoding"),
+ ContentLanguage: aws.String("ContentLanguage"),
+ ContentType: aws.String("ContentType"),
+ Expires: aws.Time(time.Now()),
+ GrantFullControl: aws.String("GrantFullControl"),
+ GrantRead: aws.String("GrantRead"),
+ GrantReadACP: aws.String("GrantReadACP"),
+ GrantWriteACP: aws.String("GrantWriteACP"),
+ Metadata: map[string]*string{
+ "Key": aws.String("MetadataValue"), // Required
+ // More values...
+ },
+ RequestPayer: aws.String("RequestPayer"),
+ SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"),
+ SSECustomerKey: aws.String("SSECustomerKey"),
+ SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"),
+ SSEKMSKeyID: aws.String("SSEKMSKeyId"),
+ ServerSideEncryption: aws.String("ServerSideEncryption"),
+ StorageClass: aws.String("StorageClass"),
+ WebsiteRedirectLocation: aws.String("WebsiteRedirectLocation"),
+ }
+ resp, err := svc.CreateMultipartUpload(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_DeleteBucket() {
+ svc := s3.New(nil)
+
+ params := &s3.DeleteBucketInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.DeleteBucket(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_DeleteBucketCORS() {
+ svc := s3.New(nil)
+
+ params := &s3.DeleteBucketCORSInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.DeleteBucketCORS(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_DeleteBucketLifecycle() {
+ svc := s3.New(nil)
+
+ params := &s3.DeleteBucketLifecycleInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.DeleteBucketLifecycle(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_DeleteBucketPolicy() {
+ svc := s3.New(nil)
+
+ params := &s3.DeleteBucketPolicyInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.DeleteBucketPolicy(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_DeleteBucketReplication() {
+ svc := s3.New(nil)
+
+ params := &s3.DeleteBucketReplicationInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.DeleteBucketReplication(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_DeleteBucketTagging() {
+ svc := s3.New(nil)
+
+ params := &s3.DeleteBucketTaggingInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.DeleteBucketTagging(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_DeleteBucketWebsite() {
+ svc := s3.New(nil)
+
+ params := &s3.DeleteBucketWebsiteInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.DeleteBucketWebsite(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_DeleteObject() {
+ svc := s3.New(nil)
+
+ params := &s3.DeleteObjectInput{
+ Bucket: aws.String("BucketName"), // Required
+ Key: aws.String("ObjectKey"), // Required
+ MFA: aws.String("MFA"),
+ RequestPayer: aws.String("RequestPayer"),
+ VersionID: aws.String("ObjectVersionId"),
+ }
+ resp, err := svc.DeleteObject(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_DeleteObjects() {
+ svc := s3.New(nil)
+
+ params := &s3.DeleteObjectsInput{
+ Bucket: aws.String("BucketName"), // Required
+ Delete: &s3.Delete{ // Required
+ Objects: []*s3.ObjectIdentifier{ // Required
+ { // Required
+ Key: aws.String("ObjectKey"), // Required
+ VersionID: aws.String("ObjectVersionId"),
+ },
+ // More values...
+ },
+ Quiet: aws.Boolean(true),
+ },
+ MFA: aws.String("MFA"),
+ RequestPayer: aws.String("RequestPayer"),
+ }
+ resp, err := svc.DeleteObjects(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_GetBucketACL() {
+ svc := s3.New(nil)
+
+ params := &s3.GetBucketACLInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.GetBucketACL(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_GetBucketCORS() {
+ svc := s3.New(nil)
+
+ params := &s3.GetBucketCORSInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.GetBucketCORS(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_GetBucketLifecycle() {
+ svc := s3.New(nil)
+
+ params := &s3.GetBucketLifecycleInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.GetBucketLifecycle(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_GetBucketLocation() {
+ svc := s3.New(nil)
+
+ params := &s3.GetBucketLocationInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.GetBucketLocation(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_GetBucketLogging() {
+ svc := s3.New(nil)
+
+ params := &s3.GetBucketLoggingInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.GetBucketLogging(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_GetBucketNotification() {
+ svc := s3.New(nil)
+
+ params := &s3.GetBucketNotificationConfigurationRequest{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.GetBucketNotification(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_GetBucketNotificationConfiguration() {
+ svc := s3.New(nil)
+
+ params := &s3.GetBucketNotificationConfigurationRequest{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.GetBucketNotificationConfiguration(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_GetBucketPolicy() {
+ svc := s3.New(nil)
+
+ params := &s3.GetBucketPolicyInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.GetBucketPolicy(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_GetBucketReplication() {
+ svc := s3.New(nil)
+
+ params := &s3.GetBucketReplicationInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.GetBucketReplication(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_GetBucketRequestPayment() {
+ svc := s3.New(nil)
+
+ params := &s3.GetBucketRequestPaymentInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.GetBucketRequestPayment(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_GetBucketTagging() {
+ svc := s3.New(nil)
+
+ params := &s3.GetBucketTaggingInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.GetBucketTagging(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_GetBucketVersioning() {
+ svc := s3.New(nil)
+
+ params := &s3.GetBucketVersioningInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.GetBucketVersioning(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_GetBucketWebsite() {
+ svc := s3.New(nil)
+
+ params := &s3.GetBucketWebsiteInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.GetBucketWebsite(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_GetObject() {
+ svc := s3.New(nil)
+
+ params := &s3.GetObjectInput{
+ Bucket: aws.String("BucketName"), // Required
+ Key: aws.String("ObjectKey"), // Required
+ IfMatch: aws.String("IfMatch"),
+ IfModifiedSince: aws.Time(time.Now()),
+ IfNoneMatch: aws.String("IfNoneMatch"),
+ IfUnmodifiedSince: aws.Time(time.Now()),
+ Range: aws.String("Range"),
+ RequestPayer: aws.String("RequestPayer"),
+ ResponseCacheControl: aws.String("ResponseCacheControl"),
+ ResponseContentDisposition: aws.String("ResponseContentDisposition"),
+ ResponseContentEncoding: aws.String("ResponseContentEncoding"),
+ ResponseContentLanguage: aws.String("ResponseContentLanguage"),
+ ResponseContentType: aws.String("ResponseContentType"),
+ ResponseExpires: aws.Time(time.Now()),
+ SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"),
+ SSECustomerKey: aws.String("SSECustomerKey"),
+ SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"),
+ VersionID: aws.String("ObjectVersionId"),
+ }
+ resp, err := svc.GetObject(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_GetObjectACL() {
+ svc := s3.New(nil)
+
+ params := &s3.GetObjectACLInput{
+ Bucket: aws.String("BucketName"), // Required
+ Key: aws.String("ObjectKey"), // Required
+ RequestPayer: aws.String("RequestPayer"),
+ VersionID: aws.String("ObjectVersionId"),
+ }
+ resp, err := svc.GetObjectACL(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_GetObjectTorrent() {
+ svc := s3.New(nil)
+
+ params := &s3.GetObjectTorrentInput{
+ Bucket: aws.String("BucketName"), // Required
+ Key: aws.String("ObjectKey"), // Required
+ RequestPayer: aws.String("RequestPayer"),
+ }
+ resp, err := svc.GetObjectTorrent(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_HeadBucket() {
+ svc := s3.New(nil)
+
+ params := &s3.HeadBucketInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.HeadBucket(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_HeadObject() {
+ svc := s3.New(nil)
+
+ params := &s3.HeadObjectInput{
+ Bucket: aws.String("BucketName"), // Required
+ Key: aws.String("ObjectKey"), // Required
+ IfMatch: aws.String("IfMatch"),
+ IfModifiedSince: aws.Time(time.Now()),
+ IfNoneMatch: aws.String("IfNoneMatch"),
+ IfUnmodifiedSince: aws.Time(time.Now()),
+ Range: aws.String("Range"),
+ RequestPayer: aws.String("RequestPayer"),
+ SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"),
+ SSECustomerKey: aws.String("SSECustomerKey"),
+ SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"),
+ VersionID: aws.String("ObjectVersionId"),
+ }
+ resp, err := svc.HeadObject(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_ListBuckets() {
+ svc := s3.New(nil)
+
+ var params *s3.ListBucketsInput
+ resp, err := svc.ListBuckets(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_ListMultipartUploads() {
+ svc := s3.New(nil)
+
+ params := &s3.ListMultipartUploadsInput{
+ Bucket: aws.String("BucketName"), // Required
+ Delimiter: aws.String("Delimiter"),
+ EncodingType: aws.String("EncodingType"),
+ KeyMarker: aws.String("KeyMarker"),
+ MaxUploads: aws.Long(1),
+ Prefix: aws.String("Prefix"),
+ UploadIDMarker: aws.String("UploadIdMarker"),
+ }
+ resp, err := svc.ListMultipartUploads(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_ListObjectVersions() {
+ svc := s3.New(nil)
+
+ params := &s3.ListObjectVersionsInput{
+ Bucket: aws.String("BucketName"), // Required
+ Delimiter: aws.String("Delimiter"),
+ EncodingType: aws.String("EncodingType"),
+ KeyMarker: aws.String("KeyMarker"),
+ MaxKeys: aws.Long(1),
+ Prefix: aws.String("Prefix"),
+ VersionIDMarker: aws.String("VersionIdMarker"),
+ }
+ resp, err := svc.ListObjectVersions(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_ListObjects() {
+ svc := s3.New(nil)
+
+ params := &s3.ListObjectsInput{
+ Bucket: aws.String("BucketName"), // Required
+ Delimiter: aws.String("Delimiter"),
+ EncodingType: aws.String("EncodingType"),
+ Marker: aws.String("Marker"),
+ MaxKeys: aws.Long(1),
+ Prefix: aws.String("Prefix"),
+ }
+ resp, err := svc.ListObjects(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_ListParts() {
+ svc := s3.New(nil)
+
+ params := &s3.ListPartsInput{
+ Bucket: aws.String("BucketName"), // Required
+ Key: aws.String("ObjectKey"), // Required
+ UploadID: aws.String("MultipartUploadId"), // Required
+ MaxParts: aws.Long(1),
+ PartNumberMarker: aws.Long(1),
+ RequestPayer: aws.String("RequestPayer"),
+ }
+ resp, err := svc.ListParts(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_PutBucketACL() {
+ svc := s3.New(nil)
+
+ params := &s3.PutBucketACLInput{
+ Bucket: aws.String("BucketName"), // Required
+ ACL: aws.String("BucketCannedACL"),
+ AccessControlPolicy: &s3.AccessControlPolicy{
+ Grants: []*s3.Grant{
+ { // Required
+ Grantee: &s3.Grantee{
+ Type: aws.String("Type"), // Required
+ DisplayName: aws.String("DisplayName"),
+ EmailAddress: aws.String("EmailAddress"),
+ ID: aws.String("ID"),
+ URI: aws.String("URI"),
+ },
+ Permission: aws.String("Permission"),
+ },
+ // More values...
+ },
+ Owner: &s3.Owner{
+ DisplayName: aws.String("DisplayName"),
+ ID: aws.String("ID"),
+ },
+ },
+ GrantFullControl: aws.String("GrantFullControl"),
+ GrantRead: aws.String("GrantRead"),
+ GrantReadACP: aws.String("GrantReadACP"),
+ GrantWrite: aws.String("GrantWrite"),
+ GrantWriteACP: aws.String("GrantWriteACP"),
+ }
+ resp, err := svc.PutBucketACL(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_PutBucketCORS() {
+ svc := s3.New(nil)
+
+ params := &s3.PutBucketCORSInput{
+ Bucket: aws.String("BucketName"), // Required
+ CORSConfiguration: &s3.CORSConfiguration{
+ CORSRules: []*s3.CORSRule{
+ { // Required
+ AllowedHeaders: []*string{
+ aws.String("AllowedHeader"), // Required
+ // More values...
+ },
+ AllowedMethods: []*string{
+ aws.String("AllowedMethod"), // Required
+ // More values...
+ },
+ AllowedOrigins: []*string{
+ aws.String("AllowedOrigin"), // Required
+ // More values...
+ },
+ ExposeHeaders: []*string{
+ aws.String("ExposeHeader"), // Required
+ // More values...
+ },
+ MaxAgeSeconds: aws.Long(1),
+ },
+ // More values...
+ },
+ },
+ }
+ resp, err := svc.PutBucketCORS(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_PutBucketLifecycle() {
+ svc := s3.New(nil)
+
+ params := &s3.PutBucketLifecycleInput{
+ Bucket: aws.String("BucketName"), // Required
+ LifecycleConfiguration: &s3.LifecycleConfiguration{
+ Rules: []*s3.LifecycleRule{ // Required
+ { // Required
+ Prefix: aws.String("Prefix"), // Required
+ Status: aws.String("ExpirationStatus"), // Required
+ Expiration: &s3.LifecycleExpiration{
+ Date: aws.Time(time.Now()),
+ Days: aws.Long(1),
+ },
+ ID: aws.String("ID"),
+ NoncurrentVersionExpiration: &s3.NoncurrentVersionExpiration{
+ NoncurrentDays: aws.Long(1),
+ },
+ NoncurrentVersionTransition: &s3.NoncurrentVersionTransition{
+ NoncurrentDays: aws.Long(1),
+ StorageClass: aws.String("TransitionStorageClass"),
+ },
+ Transition: &s3.Transition{
+ Date: aws.Time(time.Now()),
+ Days: aws.Long(1),
+ StorageClass: aws.String("TransitionStorageClass"),
+ },
+ },
+ // More values...
+ },
+ },
+ }
+ resp, err := svc.PutBucketLifecycle(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_PutBucketLogging() {
+ svc := s3.New(nil)
+
+ params := &s3.PutBucketLoggingInput{
+ Bucket: aws.String("BucketName"), // Required
+ BucketLoggingStatus: &s3.BucketLoggingStatus{ // Required
+ LoggingEnabled: &s3.LoggingEnabled{
+ TargetBucket: aws.String("TargetBucket"),
+ TargetGrants: []*s3.TargetGrant{
+ { // Required
+ Grantee: &s3.Grantee{
+ Type: aws.String("Type"), // Required
+ DisplayName: aws.String("DisplayName"),
+ EmailAddress: aws.String("EmailAddress"),
+ ID: aws.String("ID"),
+ URI: aws.String("URI"),
+ },
+ Permission: aws.String("BucketLogsPermission"),
+ },
+ // More values...
+ },
+ TargetPrefix: aws.String("TargetPrefix"),
+ },
+ },
+ }
+ resp, err := svc.PutBucketLogging(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_PutBucketNotification() {
+ svc := s3.New(nil)
+
+ params := &s3.PutBucketNotificationInput{
+ Bucket: aws.String("BucketName"), // Required
+ NotificationConfiguration: &s3.NotificationConfigurationDeprecated{ // Required
+ CloudFunctionConfiguration: &s3.CloudFunctionConfiguration{
+ CloudFunction: aws.String("CloudFunction"),
+ Event: aws.String("Event"),
+ Events: []*string{
+ aws.String("Event"), // Required
+ // More values...
+ },
+ ID: aws.String("NotificationId"),
+ InvocationRole: aws.String("CloudFunctionInvocationRole"),
+ },
+ QueueConfiguration: &s3.QueueConfigurationDeprecated{
+ Event: aws.String("Event"),
+ Events: []*string{
+ aws.String("Event"), // Required
+ // More values...
+ },
+ ID: aws.String("NotificationId"),
+ Queue: aws.String("QueueArn"),
+ },
+ TopicConfiguration: &s3.TopicConfigurationDeprecated{
+ Event: aws.String("Event"),
+ Events: []*string{
+ aws.String("Event"), // Required
+ // More values...
+ },
+ ID: aws.String("NotificationId"),
+ Topic: aws.String("TopicArn"),
+ },
+ },
+ }
+ resp, err := svc.PutBucketNotification(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_PutBucketNotificationConfiguration() {
+ svc := s3.New(nil)
+
+ params := &s3.PutBucketNotificationConfigurationInput{
+ Bucket: aws.String("BucketName"), // Required
+ NotificationConfiguration: &s3.NotificationConfiguration{ // Required
+ LambdaFunctionConfigurations: []*s3.LambdaFunctionConfiguration{
+ { // Required
+ Events: []*string{ // Required
+ aws.String("Event"), // Required
+ // More values...
+ },
+ LambdaFunctionARN: aws.String("LambdaFunctionArn"), // Required
+ ID: aws.String("NotificationId"),
+ },
+ // More values...
+ },
+ QueueConfigurations: []*s3.QueueConfiguration{
+ { // Required
+ Events: []*string{ // Required
+ aws.String("Event"), // Required
+ // More values...
+ },
+ QueueARN: aws.String("QueueArn"), // Required
+ ID: aws.String("NotificationId"),
+ },
+ // More values...
+ },
+ TopicConfigurations: []*s3.TopicConfiguration{
+ { // Required
+ Events: []*string{ // Required
+ aws.String("Event"), // Required
+ // More values...
+ },
+ TopicARN: aws.String("TopicArn"), // Required
+ ID: aws.String("NotificationId"),
+ },
+ // More values...
+ },
+ },
+ }
+ resp, err := svc.PutBucketNotificationConfiguration(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_PutBucketPolicy() {
+ svc := s3.New(nil)
+
+ params := &s3.PutBucketPolicyInput{
+ Bucket: aws.String("BucketName"), // Required
+ Policy: aws.String("Policy"), // Required
+ }
+ resp, err := svc.PutBucketPolicy(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_PutBucketReplication() {
+ svc := s3.New(nil)
+
+ params := &s3.PutBucketReplicationInput{
+ Bucket: aws.String("BucketName"), // Required
+ ReplicationConfiguration: &s3.ReplicationConfiguration{ // Required
+ Role: aws.String("Role"), // Required
+ Rules: []*s3.ReplicationRule{ // Required
+ { // Required
+ Destination: &s3.Destination{ // Required
+ Bucket: aws.String("BucketName"), // Required
+ },
+ Prefix: aws.String("Prefix"), // Required
+ Status: aws.String("ReplicationRuleStatus"), // Required
+ ID: aws.String("ID"),
+ },
+ // More values...
+ },
+ },
+ }
+ resp, err := svc.PutBucketReplication(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_PutBucketRequestPayment() {
+ svc := s3.New(nil)
+
+ params := &s3.PutBucketRequestPaymentInput{
+ Bucket: aws.String("BucketName"), // Required
+ RequestPaymentConfiguration: &s3.RequestPaymentConfiguration{ // Required
+ Payer: aws.String("Payer"), // Required
+ },
+ }
+ resp, err := svc.PutBucketRequestPayment(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_PutBucketTagging() {
+ svc := s3.New(nil)
+
+ params := &s3.PutBucketTaggingInput{
+ Bucket: aws.String("BucketName"), // Required
+ Tagging: &s3.Tagging{ // Required
+ TagSet: []*s3.Tag{ // Required
+ { // Required
+ Key: aws.String("ObjectKey"), // Required
+ Value: aws.String("Value"), // Required
+ },
+ // More values...
+ },
+ },
+ }
+ resp, err := svc.PutBucketTagging(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_PutBucketVersioning() {
+ svc := s3.New(nil)
+
+ params := &s3.PutBucketVersioningInput{
+ Bucket: aws.String("BucketName"), // Required
+ VersioningConfiguration: &s3.VersioningConfiguration{ // Required
+ MFADelete: aws.String("MFADelete"),
+ Status: aws.String("BucketVersioningStatus"),
+ },
+ MFA: aws.String("MFA"),
+ }
+ resp, err := svc.PutBucketVersioning(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_PutBucketWebsite() {
+ svc := s3.New(nil)
+
+ params := &s3.PutBucketWebsiteInput{
+ Bucket: aws.String("BucketName"), // Required
+ WebsiteConfiguration: &s3.WebsiteConfiguration{ // Required
+ ErrorDocument: &s3.ErrorDocument{
+ Key: aws.String("ObjectKey"), // Required
+ },
+ IndexDocument: &s3.IndexDocument{
+ Suffix: aws.String("Suffix"), // Required
+ },
+ RedirectAllRequestsTo: &s3.RedirectAllRequestsTo{
+ HostName: aws.String("HostName"), // Required
+ Protocol: aws.String("Protocol"),
+ },
+ RoutingRules: []*s3.RoutingRule{
+ { // Required
+ Redirect: &s3.Redirect{ // Required
+ HTTPRedirectCode: aws.String("HttpRedirectCode"),
+ HostName: aws.String("HostName"),
+ Protocol: aws.String("Protocol"),
+ ReplaceKeyPrefixWith: aws.String("ReplaceKeyPrefixWith"),
+ ReplaceKeyWith: aws.String("ReplaceKeyWith"),
+ },
+ Condition: &s3.Condition{
+ HTTPErrorCodeReturnedEquals: aws.String("HttpErrorCodeReturnedEquals"),
+ KeyPrefixEquals: aws.String("KeyPrefixEquals"),
+ },
+ },
+ // More values...
+ },
+ },
+ }
+ resp, err := svc.PutBucketWebsite(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_PutObject() {
+ svc := s3.New(nil)
+
+ params := &s3.PutObjectInput{
+ Bucket: aws.String("BucketName"), // Required
+ Key: aws.String("ObjectKey"), // Required
+ ACL: aws.String("ObjectCannedACL"),
+ Body: bytes.NewReader([]byte("PAYLOAD")),
+ CacheControl: aws.String("CacheControl"),
+ ContentDisposition: aws.String("ContentDisposition"),
+ ContentEncoding: aws.String("ContentEncoding"),
+ ContentLanguage: aws.String("ContentLanguage"),
+ ContentLength: aws.Long(1),
+ ContentType: aws.String("ContentType"),
+ Expires: aws.Time(time.Now()),
+ GrantFullControl: aws.String("GrantFullControl"),
+ GrantRead: aws.String("GrantRead"),
+ GrantReadACP: aws.String("GrantReadACP"),
+ GrantWriteACP: aws.String("GrantWriteACP"),
+ Metadata: map[string]*string{
+ "Key": aws.String("MetadataValue"), // Required
+ // More values...
+ },
+ RequestPayer: aws.String("RequestPayer"),
+ SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"),
+ SSECustomerKey: aws.String("SSECustomerKey"),
+ SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"),
+ SSEKMSKeyID: aws.String("SSEKMSKeyId"),
+ ServerSideEncryption: aws.String("ServerSideEncryption"),
+ StorageClass: aws.String("StorageClass"),
+ WebsiteRedirectLocation: aws.String("WebsiteRedirectLocation"),
+ }
+ resp, err := svc.PutObject(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_PutObjectACL() {
+ svc := s3.New(nil)
+
+ params := &s3.PutObjectACLInput{
+ Bucket: aws.String("BucketName"), // Required
+ Key: aws.String("ObjectKey"), // Required
+ ACL: aws.String("ObjectCannedACL"),
+ AccessControlPolicy: &s3.AccessControlPolicy{
+ Grants: []*s3.Grant{
+ { // Required
+ Grantee: &s3.Grantee{
+ Type: aws.String("Type"), // Required
+ DisplayName: aws.String("DisplayName"),
+ EmailAddress: aws.String("EmailAddress"),
+ ID: aws.String("ID"),
+ URI: aws.String("URI"),
+ },
+ Permission: aws.String("Permission"),
+ },
+ // More values...
+ },
+ Owner: &s3.Owner{
+ DisplayName: aws.String("DisplayName"),
+ ID: aws.String("ID"),
+ },
+ },
+ GrantFullControl: aws.String("GrantFullControl"),
+ GrantRead: aws.String("GrantRead"),
+ GrantReadACP: aws.String("GrantReadACP"),
+ GrantWrite: aws.String("GrantWrite"),
+ GrantWriteACP: aws.String("GrantWriteACP"),
+ RequestPayer: aws.String("RequestPayer"),
+ }
+ resp, err := svc.PutObjectACL(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_RestoreObject() {
+ svc := s3.New(nil)
+
+ params := &s3.RestoreObjectInput{
+ Bucket: aws.String("BucketName"), // Required
+ Key: aws.String("ObjectKey"), // Required
+ RequestPayer: aws.String("RequestPayer"),
+ RestoreRequest: &s3.RestoreRequest{
+ Days: aws.Long(1), // Required
+ },
+ VersionID: aws.String("ObjectVersionId"),
+ }
+ resp, err := svc.RestoreObject(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_UploadPart() {
+ svc := s3.New(nil)
+
+ params := &s3.UploadPartInput{
+ Bucket: aws.String("BucketName"), // Required
+ Key: aws.String("ObjectKey"), // Required
+ PartNumber: aws.Long(1), // Required
+ UploadID: aws.String("MultipartUploadId"), // Required
+ Body: bytes.NewReader([]byte("PAYLOAD")),
+ ContentLength: aws.Long(1),
+ RequestPayer: aws.String("RequestPayer"),
+ SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"),
+ SSECustomerKey: aws.String("SSECustomerKey"),
+ SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"),
+ }
+ resp, err := svc.UploadPart(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
+
+func ExampleS3_UploadPartCopy() {
+ svc := s3.New(nil)
+
+ params := &s3.UploadPartCopyInput{
+ Bucket: aws.String("BucketName"), // Required
+ CopySource: aws.String("CopySource"), // Required
+ Key: aws.String("ObjectKey"), // Required
+ PartNumber: aws.Long(1), // Required
+ UploadID: aws.String("MultipartUploadId"), // Required
+ CopySourceIfMatch: aws.String("CopySourceIfMatch"),
+ CopySourceIfModifiedSince: aws.Time(time.Now()),
+ CopySourceIfNoneMatch: aws.String("CopySourceIfNoneMatch"),
+ CopySourceIfUnmodifiedSince: aws.Time(time.Now()),
+ CopySourceRange: aws.String("CopySourceRange"),
+ CopySourceSSECustomerAlgorithm: aws.String("CopySourceSSECustomerAlgorithm"),
+ CopySourceSSECustomerKey: aws.String("CopySourceSSECustomerKey"),
+ CopySourceSSECustomerKeyMD5: aws.String("CopySourceSSECustomerKeyMD5"),
+ RequestPayer: aws.String("RequestPayer"),
+ SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"),
+ SSECustomerKey: aws.String("SSECustomerKey"),
+ SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"),
+ }
+ resp, err := svc.UploadPartCopy(params)
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // Generic AWS Error with Code, Message, and original error (if any)
+ fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
+ }
+ } else {
+ // This case should never be hit, the SDK should always return an
+ // error which satisfies the awserr.Error interface.
+ fmt.Println(err.Error())
+ }
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(awsutil.StringValue(resp))
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
new file mode 100644
index 0000000..07c62dc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
@@ -0,0 +1,53 @@
+package s3
+
+import (
+ "regexp"
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+)
+
+var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
+var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`)
+
+// dnsCompatibleBucketName returns true if the bucket name is DNS compatible.
+// Buckets created outside of the classic region MUST be DNS compatible.
+func dnsCompatibleBucketName(bucket string) bool {
+ return reDomain.MatchString(bucket) &&
+ !reIPAddress.MatchString(bucket) &&
+ !strings.Contains(bucket, "..")
+}
+
+// hostStyleBucketName returns true if the request should put the bucket in
+// the host. This is false if S3ForcePathStyle is explicitly set or if the
+// bucket is not DNS compatible.
+func hostStyleBucketName(r *aws.Request, bucket string) bool {
+ if r.Config.S3ForcePathStyle {
+ return false
+ }
+
+ // Bucket might be DNS compatible but dots in the hostname will fail
+ // certificate validation, so do not use host-style.
+ if r.HTTPRequest.URL.Scheme == "https" && strings.Contains(bucket, ".") {
+ return false
+ }
+
+ // Use host-style if the bucket is DNS compatible
+ return dnsCompatibleBucketName(bucket)
+}
+
+func updateHostWithBucket(r *aws.Request) {
+ b := awsutil.ValuesAtPath(r.Params, "Bucket")
+ if len(b) == 0 {
+ return
+ }
+
+ if bucket := b[0].(string); bucket != "" && hostStyleBucketName(r, bucket) {
+ r.HTTPRequest.URL.Host = bucket + "." + r.HTTPRequest.URL.Host
+ r.HTTPRequest.URL.Path = strings.Replace(r.HTTPRequest.URL.Path, "/{Bucket}", "", -1)
+ if r.HTTPRequest.URL.Path == "" {
+ r.HTTPRequest.URL.Path = "/"
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket_test.go
new file mode 100644
index 0000000..23cdf7f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket_test.go
@@ -0,0 +1,61 @@
+package s3_test
+
+import (
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/internal/test/unit"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/stretchr/testify/assert"
+)
+
+type s3BucketTest struct {
+ bucket string
+ url string
+}
+
+var (
+ _ = unit.Imported
+
+ sslTests = []s3BucketTest{
+ {"abc", "https://abc.s3.mock-region.amazonaws.com/"},
+ {"a$b$c", "https://s3.mock-region.amazonaws.com/a%24b%24c"},
+ {"a.b.c", "https://s3.mock-region.amazonaws.com/a.b.c"},
+ {"a..bc", "https://s3.mock-region.amazonaws.com/a..bc"},
+ }
+
+ nosslTests = []s3BucketTest{
+ {"a.b.c", "http://a.b.c.s3.mock-region.amazonaws.com/"},
+ {"a..bc", "http://s3.mock-region.amazonaws.com/a..bc"},
+ }
+
+ forcepathTests = []s3BucketTest{
+ {"abc", "https://s3.mock-region.amazonaws.com/abc"},
+ {"a$b$c", "https://s3.mock-region.amazonaws.com/a%24b%24c"},
+ {"a.b.c", "https://s3.mock-region.amazonaws.com/a.b.c"},
+ {"a..bc", "https://s3.mock-region.amazonaws.com/a..bc"},
+ }
+)
+
+func runTests(t *testing.T, svc *s3.S3, tests []s3BucketTest) {
+ for _, test := range tests {
+ req, _ := svc.ListObjectsRequest(&s3.ListObjectsInput{Bucket: &test.bucket})
+ req.Build()
+ assert.Equal(t, test.url, req.HTTPRequest.URL.String())
+ }
+}
+
+func TestHostStyleBucketBuild(t *testing.T) {
+ s := s3.New(nil)
+ runTests(t, s, sslTests)
+}
+
+func TestHostStyleBucketBuildNoSSL(t *testing.T) {
+ s := s3.New(&aws.Config{DisableSSL: true})
+ runTests(t, s, nosslTests)
+}
+
+func TestPathStyleBucketBuild(t *testing.T) {
+ s := s3.New(&aws.Config{S3ForcePathStyle: true})
+ runTests(t, s, forcepathTests)
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go
new file mode 100644
index 0000000..d799e25
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go
@@ -0,0 +1,119 @@
+// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+
+// Package s3iface provides an interface for the Amazon Simple Storage Service.
+package s3iface
+
+import (
+ "github.com/aws/aws-sdk-go/service/s3"
+)
+
+// S3API is the interface type for s3.S3.
+type S3API interface {
+ AbortMultipartUpload(*s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error)
+
+ CompleteMultipartUpload(*s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error)
+
+ CopyObject(*s3.CopyObjectInput) (*s3.CopyObjectOutput, error)
+
+ CreateBucket(*s3.CreateBucketInput) (*s3.CreateBucketOutput, error)
+
+ CreateMultipartUpload(*s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error)
+
+ DeleteBucket(*s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error)
+
+ DeleteBucketCORS(*s3.DeleteBucketCORSInput) (*s3.DeleteBucketCORSOutput, error)
+
+ DeleteBucketLifecycle(*s3.DeleteBucketLifecycleInput) (*s3.DeleteBucketLifecycleOutput, error)
+
+ DeleteBucketPolicy(*s3.DeleteBucketPolicyInput) (*s3.DeleteBucketPolicyOutput, error)
+
+ DeleteBucketReplication(*s3.DeleteBucketReplicationInput) (*s3.DeleteBucketReplicationOutput, error)
+
+ DeleteBucketTagging(*s3.DeleteBucketTaggingInput) (*s3.DeleteBucketTaggingOutput, error)
+
+ DeleteBucketWebsite(*s3.DeleteBucketWebsiteInput) (*s3.DeleteBucketWebsiteOutput, error)
+
+ DeleteObject(*s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error)
+
+ DeleteObjects(*s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error)
+
+ GetBucketACL(*s3.GetBucketACLInput) (*s3.GetBucketACLOutput, error)
+
+ GetBucketCORS(*s3.GetBucketCORSInput) (*s3.GetBucketCORSOutput, error)
+
+ GetBucketLifecycle(*s3.GetBucketLifecycleInput) (*s3.GetBucketLifecycleOutput, error)
+
+ GetBucketLocation(*s3.GetBucketLocationInput) (*s3.GetBucketLocationOutput, error)
+
+ GetBucketLogging(*s3.GetBucketLoggingInput) (*s3.GetBucketLoggingOutput, error)
+
+ GetBucketNotification(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfigurationDeprecated, error)
+
+ GetBucketNotificationConfiguration(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfiguration, error)
+
+ GetBucketPolicy(*s3.GetBucketPolicyInput) (*s3.GetBucketPolicyOutput, error)
+
+ GetBucketReplication(*s3.GetBucketReplicationInput) (*s3.GetBucketReplicationOutput, error)
+
+ GetBucketRequestPayment(*s3.GetBucketRequestPaymentInput) (*s3.GetBucketRequestPaymentOutput, error)
+
+ GetBucketTagging(*s3.GetBucketTaggingInput) (*s3.GetBucketTaggingOutput, error)
+
+ GetBucketVersioning(*s3.GetBucketVersioningInput) (*s3.GetBucketVersioningOutput, error)
+
+ GetBucketWebsite(*s3.GetBucketWebsiteInput) (*s3.GetBucketWebsiteOutput, error)
+
+ GetObject(*s3.GetObjectInput) (*s3.GetObjectOutput, error)
+
+ GetObjectACL(*s3.GetObjectACLInput) (*s3.GetObjectACLOutput, error)
+
+ GetObjectTorrent(*s3.GetObjectTorrentInput) (*s3.GetObjectTorrentOutput, error)
+
+ HeadBucket(*s3.HeadBucketInput) (*s3.HeadBucketOutput, error)
+
+ HeadObject(*s3.HeadObjectInput) (*s3.HeadObjectOutput, error)
+
+ ListBuckets(*s3.ListBucketsInput) (*s3.ListBucketsOutput, error)
+
+ ListMultipartUploads(*s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error)
+
+ ListObjectVersions(*s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error)
+
+ ListObjects(*s3.ListObjectsInput) (*s3.ListObjectsOutput, error)
+
+ ListParts(*s3.ListPartsInput) (*s3.ListPartsOutput, error)
+
+ PutBucketACL(*s3.PutBucketACLInput) (*s3.PutBucketACLOutput, error)
+
+ PutBucketCORS(*s3.PutBucketCORSInput) (*s3.PutBucketCORSOutput, error)
+
+ PutBucketLifecycle(*s3.PutBucketLifecycleInput) (*s3.PutBucketLifecycleOutput, error)
+
+ PutBucketLogging(*s3.PutBucketLoggingInput) (*s3.PutBucketLoggingOutput, error)
+
+ PutBucketNotification(*s3.PutBucketNotificationInput) (*s3.PutBucketNotificationOutput, error)
+
+ PutBucketNotificationConfiguration(*s3.PutBucketNotificationConfigurationInput) (*s3.PutBucketNotificationConfigurationOutput, error)
+
+ PutBucketPolicy(*s3.PutBucketPolicyInput) (*s3.PutBucketPolicyOutput, error)
+
+ PutBucketReplication(*s3.PutBucketReplicationInput) (*s3.PutBucketReplicationOutput, error)
+
+ PutBucketRequestPayment(*s3.PutBucketRequestPaymentInput) (*s3.PutBucketRequestPaymentOutput, error)
+
+ PutBucketTagging(*s3.PutBucketTaggingInput) (*s3.PutBucketTaggingOutput, error)
+
+ PutBucketVersioning(*s3.PutBucketVersioningInput) (*s3.PutBucketVersioningOutput, error)
+
+ PutBucketWebsite(*s3.PutBucketWebsiteInput) (*s3.PutBucketWebsiteOutput, error)
+
+ PutObject(*s3.PutObjectInput) (*s3.PutObjectOutput, error)
+
+ PutObjectACL(*s3.PutObjectACLInput) (*s3.PutObjectACLOutput, error)
+
+ RestoreObject(*s3.RestoreObjectInput) (*s3.RestoreObjectOutput, error)
+
+ UploadPart(*s3.UploadPartInput) (*s3.UploadPartOutput, error)
+
+ UploadPartCopy(*s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error)
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3iface/interface_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3iface/interface_test.go
new file mode 100644
index 0000000..cd67215
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3iface/interface_test.go
@@ -0,0 +1,15 @@
+// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+
+package s3iface_test
+
+import (
+ "testing"
+
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/aws/aws-sdk-go/service/s3/s3iface"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestInterface(t *testing.T) {
+ assert.Implements(t, (*s3iface.S3API)(nil), s3.New(nil))
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go
new file mode 100644
index 0000000..d6ebe02
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go
@@ -0,0 +1,257 @@
+package s3manager
+
+import (
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/service/s3"
+)
+
+// The default range of bytes to get at a time when using Download().
+var DefaultDownloadPartSize int64 = 1024 * 1024 * 5
+
+// The default number of goroutines to spin up when using Download().
+var DefaultDownloadConcurrency = 5
+
+// The default set of options used when opts is nil in Download().
+var DefaultDownloadOptions = &DownloadOptions{
+ PartSize: DefaultDownloadPartSize,
+ Concurrency: DefaultDownloadConcurrency,
+}
+
+// DownloadOptions keeps tracks of extra options to pass to an Download() call.
+type DownloadOptions struct {
+ // The buffer size (in bytes) to use when buffering data into chunks and
+ // sending them as parts to S3. The minimum allowed part size is 5MB, and
+ // if this value is set to zero, the DefaultPartSize value will be used.
+ PartSize int64
+
+ // The number of goroutines to spin up in parallel when sending parts.
+ // If this is set to zero, the DefaultConcurrency value will be used.
+ Concurrency int
+
+ // An S3 client to use when performing downloads. Leave this as nil to use
+ // a default client.
+ S3 *s3.S3
+}
+
+// NewDownloader creates a new Downloader structure that downloads an object
+// from S3 in concurrent chunks. Pass in an optional DownloadOptions struct
+// to customize the downloader behavior.
+func NewDownloader(opts *DownloadOptions) *Downloader {
+ if opts == nil {
+ opts = DefaultDownloadOptions
+ }
+ return &Downloader{opts: opts}
+}
+
+// The Downloader structure that calls Download(). It is safe to call Download()
+// on this structure for multiple objects and across concurrent goroutines.
+type Downloader struct {
+ opts *DownloadOptions
+}
+
+// Download downloads an object in S3 and writes the payload into w using
+// concurrent GET requests.
+//
+// It is safe to call this method for multiple objects and across concurrent
+// goroutines.
+func (d *Downloader) Download(w io.WriterAt, input *s3.GetObjectInput) (n int64, err error) {
+ impl := downloader{w: w, in: input, opts: *d.opts}
+ return impl.download()
+}
+
+// downloader is the implementation structure used internally by Downloader.
+type downloader struct {
+ opts DownloadOptions
+ in *s3.GetObjectInput
+ w io.WriterAt
+
+ wg sync.WaitGroup
+ m sync.Mutex
+
+ pos int64
+ totalBytes int64
+ written int64
+ err error
+}
+
+// init initializes the downloader with default options.
+func (d *downloader) init() {
+ d.totalBytes = -1
+
+ if d.opts.Concurrency == 0 {
+ d.opts.Concurrency = DefaultDownloadConcurrency
+ }
+
+ if d.opts.PartSize == 0 {
+ d.opts.PartSize = DefaultDownloadPartSize
+ }
+
+ if d.opts.S3 == nil {
+ d.opts.S3 = s3.New(nil)
+ }
+}
+
+// download performs the implementation of the object download across ranged
+// GETs.
+func (d *downloader) download() (n int64, err error) {
+ d.init()
+
+ // Spin up workers
+ ch := make(chan dlchunk, d.opts.Concurrency)
+ for i := 0; i < d.opts.Concurrency; i++ {
+ d.wg.Add(1)
+ go d.downloadPart(ch)
+ }
+
+ // Assign work
+ for d.geterr() == nil {
+ if d.pos != 0 {
+ // This is not the first chunk, let's wait until we know the total
+ // size of the payload so we can see if we have read the entire
+ // object.
+ total := d.getTotalBytes()
+
+ if total < 0 {
+ // Total has not yet been set, so sleep and loop around while
+ // waiting for our first worker to resolve this value.
+ time.Sleep(10 * time.Millisecond)
+ continue
+ } else if d.pos >= total {
+ break // We're finished queueing chunks
+ }
+ }
+
+ // Queue the next range of bytes to read.
+ ch <- dlchunk{w: d.w, start: d.pos, size: d.opts.PartSize}
+ d.pos += d.opts.PartSize
+ }
+
+ // Wait for completion
+ close(ch)
+ d.wg.Wait()
+
+ // Return error
+ return d.written, d.err
+}
+
+// downloadPart is an individual goroutine worker reading from the ch channel
+// and performing a GetObject request on the data with a given byte range.
+//
+// If this is the first worker, this operation also resolves the total number
+// of bytes to be read so that the worker manager knows when it is finished.
+func (d *downloader) downloadPart(ch chan dlchunk) {
+ defer d.wg.Done()
+
+ for {
+ chunk, ok := <-ch
+
+ if !ok {
+ break
+ }
+
+ if d.geterr() == nil {
+ // Get the next byte range of data
+ in := &s3.GetObjectInput{}
+ awsutil.Copy(in, d.in)
+ rng := fmt.Sprintf("bytes=%d-%d",
+ chunk.start, chunk.start+chunk.size-1)
+ in.Range = &rng
+
+ resp, err := d.opts.S3.GetObject(in)
+ if err != nil {
+ d.seterr(err)
+ } else {
+ d.setTotalBytes(resp) // Set total if not yet set.
+
+ n, err := io.Copy(&chunk, resp.Body)
+ resp.Body.Close()
+
+ if err != nil {
+ d.seterr(err)
+ }
+ d.incrwritten(n)
+ }
+ }
+ }
+}
+
+// getTotalBytes is a thread-safe getter for retrieving the total byte status.
+func (d *downloader) getTotalBytes() int64 {
+ d.m.Lock()
+ defer d.m.Unlock()
+
+ return d.totalBytes
+}
+
+// getTotalBytes is a thread-safe setter for setting the total byte status.
+func (d *downloader) setTotalBytes(resp *s3.GetObjectOutput) {
+ d.m.Lock()
+ defer d.m.Unlock()
+
+ if d.totalBytes >= 0 {
+ return
+ }
+
+ parts := strings.Split(*resp.ContentRange, "/")
+ total, err := strconv.ParseInt(parts[len(parts)-1], 10, 64)
+ if err != nil {
+ d.err = err
+ return
+ }
+
+ d.totalBytes = total
+}
+
+func (d *downloader) incrwritten(n int64) {
+ d.m.Lock()
+ defer d.m.Unlock()
+
+ d.written += n
+}
+
+// geterr is a thread-safe getter for the error object
+func (d *downloader) geterr() error {
+ d.m.Lock()
+ defer d.m.Unlock()
+
+ return d.err
+}
+
+// seterr is a thread-safe setter for the error object
+func (d *downloader) seterr(e error) {
+ d.m.Lock()
+ defer d.m.Unlock()
+
+ d.err = e
+}
+
+// dlchunk represents a single chunk of data to write by the worker routine.
+// This structure also implements an io.SectionReader style interface for
+// io.WriterAt, effectively making it an io.SectionWriter (which does not
+// exist).
+type dlchunk struct {
+ w io.WriterAt
+ start int64
+ size int64
+ cur int64
+}
+
+// Write wraps io.WriterAt for the dlchunk, writing from the dlchunk's start
+// position to its end (or EOF).
+func (c *dlchunk) Write(p []byte) (n int, err error) {
+ if c.cur >= c.size {
+ return 0, io.EOF
+ }
+
+ n, err = c.w.WriteAt(p, c.start+c.cur)
+ c.cur += int64(n)
+
+ return
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download_test.go
new file mode 100644
index 0000000..2c9c3b4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download_test.go
@@ -0,0 +1,165 @@
+package s3manager_test
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "regexp"
+ "strconv"
+ "sync"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/internal/test/unit"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/aws/aws-sdk-go/service/s3/s3manager"
+ "github.com/stretchr/testify/assert"
+)
+
+var _ = unit.Imported
+
+func dlLoggingSvc(data []byte) (*s3.S3, *[]string, *[]string) {
+ var m sync.Mutex
+ names := []string{}
+ ranges := []string{}
+
+ svc := s3.New(nil)
+ svc.Handlers.Send.Clear()
+ svc.Handlers.Send.PushBack(func(r *aws.Request) {
+ m.Lock()
+ defer m.Unlock()
+
+ names = append(names, r.Operation.Name)
+ ranges = append(ranges, *r.Params.(*s3.GetObjectInput).Range)
+
+ rerng := regexp.MustCompile(`bytes=(\d+)-(\d+)`)
+ rng := rerng.FindStringSubmatch(r.HTTPRequest.Header.Get("Range"))
+ start, _ := strconv.ParseInt(rng[1], 10, 64)
+ fin, _ := strconv.ParseInt(rng[2], 10, 64)
+ fin++
+
+ if fin > int64(len(data)) {
+ fin = int64(len(data))
+ }
+
+ r.HTTPResponse = &http.Response{
+ StatusCode: 200,
+ Body: ioutil.NopCloser(bytes.NewReader(data[start:fin])),
+ Header: http.Header{},
+ }
+ r.HTTPResponse.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d",
+ start, fin, len(data)))
+ })
+
+ return svc, &names, &ranges
+}
+
+type dlwriter struct {
+ buf []byte
+}
+
+func newDLWriter(size int) *dlwriter {
+ return &dlwriter{buf: make([]byte, size)}
+}
+
+func (d dlwriter) WriteAt(p []byte, pos int64) (n int, err error) {
+ if pos > int64(len(d.buf)) {
+ return 0, io.EOF
+ }
+
+ written := 0
+ for i, b := range p {
+ if i >= len(d.buf) {
+ break
+ }
+ d.buf[pos+int64(i)] = b
+ written++
+ }
+ return written, nil
+}
+
+func TestDownloadOrder(t *testing.T) {
+ s, names, ranges := dlLoggingSvc(buf12MB)
+
+ opts := &s3manager.DownloadOptions{S3: s, Concurrency: 1}
+ d := s3manager.NewDownloader(opts)
+ w := newDLWriter(len(buf12MB))
+ n, err := d.Download(w, &s3.GetObjectInput{
+ Bucket: aws.String("bucket"),
+ Key: aws.String("key"),
+ })
+
+ assert.Nil(t, err)
+ assert.Equal(t, int64(len(buf12MB)), n)
+ assert.Equal(t, []string{"GetObject", "GetObject", "GetObject"}, *names)
+ assert.Equal(t, []string{"bytes=0-5242879", "bytes=5242880-10485759", "bytes=10485760-15728639"}, *ranges)
+
+ count := 0
+ for _, b := range w.buf {
+ count += int(b)
+ }
+ assert.Equal(t, 0, count)
+}
+
+func TestDownloadZero(t *testing.T) {
+ s, names, ranges := dlLoggingSvc([]byte{})
+
+ opts := &s3manager.DownloadOptions{S3: s}
+ d := s3manager.NewDownloader(opts)
+ w := newDLWriter(0)
+ n, err := d.Download(w, &s3.GetObjectInput{
+ Bucket: aws.String("bucket"),
+ Key: aws.String("key"),
+ })
+
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ assert.Equal(t, []string{"GetObject"}, *names)
+ assert.Equal(t, []string{"bytes=0-5242879"}, *ranges)
+}
+
+func TestDownloadSetPartSize(t *testing.T) {
+ s, names, ranges := dlLoggingSvc([]byte{1, 2, 3})
+
+ opts := &s3manager.DownloadOptions{S3: s, PartSize: 1, Concurrency: 1}
+ d := s3manager.NewDownloader(opts)
+ w := newDLWriter(3)
+ n, err := d.Download(w, &s3.GetObjectInput{
+ Bucket: aws.String("bucket"),
+ Key: aws.String("key"),
+ })
+
+ assert.Nil(t, err)
+ assert.Equal(t, int64(3), n)
+ assert.Equal(t, []string{"GetObject", "GetObject", "GetObject"}, *names)
+ assert.Equal(t, []string{"bytes=0-0", "bytes=1-1", "bytes=2-2"}, *ranges)
+ assert.Equal(t, []byte{1, 2, 3}, w.buf)
+}
+
+func TestDownloadError(t *testing.T) {
+ s, names, _ := dlLoggingSvc([]byte{1, 2, 3})
+ opts := &s3manager.DownloadOptions{S3: s, PartSize: 1, Concurrency: 1}
+
+ num := 0
+ s.Handlers.Send.PushBack(func(r *aws.Request) {
+ num++
+ if num > 1 {
+ r.HTTPResponse.StatusCode = 400
+ r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
+ }
+ })
+
+ d := s3manager.NewDownloader(opts)
+ w := newDLWriter(3)
+ n, err := d.Download(w, &s3.GetObjectInput{
+ Bucket: aws.String("bucket"),
+ Key: aws.String("key"),
+ })
+
+ assert.NotNil(t, err)
+ assert.Equal(t, int64(1), n)
+ assert.Equal(t, []string{"GetObject", "GetObject"}, *names)
+ assert.Equal(t, []byte{1, 0, 0}, w.buf)
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
new file mode 100644
index 0000000..db55680
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
@@ -0,0 +1,562 @@
+package s3manager
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/service/s3"
+)
+
+// The maximum allowed number of parts in a multi-part upload on Amazon S3.
+var MaxUploadParts = 10000
+
+// The minimum allowed part size when uploading a part to Amazon S3.
+var MinUploadPartSize int64 = 1024 * 1024 * 5
+
+// The default part size to buffer chunks of a payload into.
+var DefaultUploadPartSize = MinUploadPartSize
+
+// The default number of goroutines to spin up when using Upload().
+var DefaultUploadConcurrency = 5
+
+// The default set of options used when opts is nil in Upload().
+var DefaultUploadOptions = &UploadOptions{
+ PartSize: DefaultUploadPartSize,
+ Concurrency: DefaultUploadConcurrency,
+ LeavePartsOnError: false,
+ S3: nil,
+}
+
+// A MultiUploadFailure wraps a failed S3 multipart upload. An error returned
+// will satisfy this interface when a multi part upload failed to upload all
+// chucks to S3. In the case of a failure the UploadID is needed to operate on
+// the chunks, if any, which were uploaded.
+//
+// Example:
+//
+// u := s3manager.NewUploader(opts)
+// output, err := u.upload(input)
+// if err != nil {
+// if multierr, ok := err.(MultiUploadFailure); ok {
+// // Process error and its associated uploadID
+// fmt.Println("Error:", multierr.Code(), multierr.Message(), multierr.UploadID())
+// } else {
+// // Process error generically
+// fmt.Println("Error:", err.Error())
+// }
+// }
+//
+type MultiUploadFailure interface {
+ awserr.Error
+
+ // Returns the upload id for the S3 multipart upload that failed.
+ UploadID() string
+}
+
+// So that the Error interface type can be included as an anonymous field
+// in the multiUploadError struct and not conflict with the error.Error() method.
+type awsError awserr.Error
+
+// A multiUploadError wraps the upload ID of a failed s3 multipart upload.
+// Composed of BaseError for code, message, and original error
+//
+// Should be used for an error that occurred failing a S3 multipart upload,
+// and a upload ID is available. If an uploadID is not available a more relevant
+type multiUploadError struct {
+ awsError
+
+ // ID for multipart upload which failed.
+ uploadID string
+}
+
+// Error returns the string representation of the error.
+//
+// See apierr.BaseError ErrorWithExtra for output format
+//
+// Satisfies the error interface.
+func (m multiUploadError) Error() string {
+ extra := fmt.Sprintf("upload id: %s", m.uploadID)
+ return awserr.SprintError(m.Code(), m.Message(), extra, m.OrigErr())
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (m multiUploadError) String() string {
+ return m.Error()
+}
+
+// UploadID returns the id of the S3 upload which failed.
+func (m multiUploadError) UploadID() string {
+ return m.uploadID
+}
+
+// UploadInput contains all input for upload requests to Amazon S3.
+type UploadInput struct {
+ // The canned ACL to apply to the object.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+ // Specifies what content encodings have been applied to the object and thus
+ // what decoding mechanisms must be applied to obtain the media-type referenced
+ // by the Content-Type header field.
+ ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+ // The language the content is in.
+ ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to read the object data and its metadata.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the object ACL.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable object.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+
+ Key *string `location:"uri" locationName:"Key" type:"string" required:"true"`
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"`
+
+ // Specifies the algorithm to use to when encrypting the object (e.g., AES256,
+ // aws:kms).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // does not store the encryption key. The key must be appropriate for use with
+ // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+ // header.
+ SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
+ // requests for an object protected by AWS KMS will fail if not made via SSL
+ // or using SigV4. Documentation on configuring any of the officially supported
+ // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
+ SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"`
+
+ // The type of storage to use for the object. Defaults to 'STANDARD'.
+ StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string"`
+
+ // If the bucket is configured as a website, redirects requests for this object
+ // to another object in the same bucket or to an external URL. Amazon S3 stores
+ // the value of this header in the object metadata.
+ WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+
+ // The readable body payload to send to S3.
+ Body io.Reader
+}
+
+// UploadOutput represents a response from the Upload() call.
+type UploadOutput struct {
+ // The URL where the object was uploaded to.
+ Location string
+
+ // The ID for a multipart upload to S3. In the case of an error the error
+ // can be cast to the MultiUploadFailure interface to extract the upload ID.
+ UploadID string
+}
+
+// UploadOptions keeps tracks of extra options to pass to an Upload() call.
+type UploadOptions struct {
+ // The buffer size (in bytes) to use when buffering data into chunks and
+ // sending them as parts to S3. The minimum allowed part size is 5MB, and
+ // if this value is set to zero, the DefaultPartSize value will be used.
+ PartSize int64
+
+ // The number of goroutines to spin up in parallel when sending parts.
+ // If this is set to zero, the DefaultConcurrency value will be used.
+ Concurrency int
+
+ // Setting this value to true will cause the SDK to avoid calling
+ // AbortMultipartUpload on a failure, leaving all successfully uploaded
+ // parts on S3 for manual recovery.
+ //
+ // Note that storing parts of an incomplete multipart upload counts towards
+ // space usage on S3 and will add additional costs if not cleaned up.
+ LeavePartsOnError bool
+
+ // The client to use when uploading to S3. Leave this as nil to use the
+ // default S3 client.
+ S3 *s3.S3
+}
+
+// NewUploader creates a new Uploader object to upload data to S3. Pass in
+// an optional opts structure to customize the uploader behavior.
+func NewUploader(opts *UploadOptions) *Uploader {
+ if opts == nil {
+ opts = DefaultUploadOptions
+ }
+ return &Uploader{opts: opts}
+}
+
+// The Uploader structure that calls Upload(). It is safe to call Upload()
+// on this structure for multiple objects and across concurrent goroutines.
+type Uploader struct {
+ opts *UploadOptions
+}
+
+// Upload uploads an object to S3, intelligently buffering large files into
+// smaller chunks and sending them in parallel across multiple goroutines. You
+// can configure the buffer size and concurrency through the opts parameter.
+//
+// If opts is set to nil, DefaultUploadOptions will be used.
+//
+// It is safe to call this method for multiple objects and across concurrent
+// goroutines.
+func (u *Uploader) Upload(input *UploadInput) (*UploadOutput, error) {
+ i := uploader{in: input, opts: *u.opts}
+ return i.upload()
+}
+
+// internal structure to manage an upload to S3.
+type uploader struct {
+ in *UploadInput
+ opts UploadOptions
+
+ readerPos int64 // current reader position
+ totalSize int64 // set to -1 if the size is not known
+}
+
+// internal logic for deciding whether to upload a single part or use a
+// multipart upload.
+func (u *uploader) upload() (*UploadOutput, error) {
+ u.init()
+
+ if u.opts.PartSize < MinUploadPartSize {
+ msg := fmt.Sprintf("part size must be at least %d bytes", MinUploadPartSize)
+ return nil, awserr.New("ConfigError", msg, nil)
+ }
+
+ // Do one read to determine if we have more than one part
+ buf, err := u.nextReader()
+ if err == io.EOF || err == io.ErrUnexpectedEOF { // single part
+ return u.singlePart(buf)
+ } else if err != nil {
+ return nil, awserr.New("ReadRequestBody", "read upload data failed", err)
+ }
+
+ mu := multiuploader{uploader: u}
+ return mu.upload(buf)
+}
+
+// init will initialize all default options.
+func (u *uploader) init() {
+ if u.opts.S3 == nil {
+ u.opts.S3 = s3.New(nil)
+ }
+ if u.opts.Concurrency == 0 {
+ u.opts.Concurrency = DefaultUploadConcurrency
+ }
+ if u.opts.PartSize == 0 {
+ u.opts.PartSize = DefaultUploadPartSize
+ }
+
+ // Try to get the total size for some optimizations
+ u.initSize()
+}
+
+// initSize tries to detect the total stream size, setting u.totalSize. If
+// the size is not known, totalSize is set to -1.
+func (u *uploader) initSize() {
+ u.totalSize = -1
+
+ switch r := u.in.Body.(type) {
+ case io.Seeker:
+ pos, _ := r.Seek(0, 1)
+ defer r.Seek(pos, 0)
+
+ n, err := r.Seek(0, 2)
+ if err != nil {
+ return
+ }
+ u.totalSize = n
+
+ // try to adjust partSize if it is too small
+ if u.totalSize/u.opts.PartSize >= int64(MaxUploadParts) {
+ u.opts.PartSize = u.totalSize / int64(MaxUploadParts)
+ }
+ }
+}
+
+// nextReader returns a seekable reader representing the next packet of data.
+// This operation increases the shared u.readerPos counter, but note that it
+// does not need to be wrapped in a mutex because nextReader is only called
+// from the main thread.
+func (u *uploader) nextReader() (io.ReadSeeker, error) {
+ switch r := u.in.Body.(type) {
+ case io.ReaderAt:
+ var err error
+
+ n := u.opts.PartSize
+ if u.totalSize >= 0 {
+ bytesLeft := u.totalSize - u.readerPos
+
+ if bytesLeft == 0 {
+ err = io.EOF
+ } else if bytesLeft <= u.opts.PartSize {
+ err = io.ErrUnexpectedEOF
+ n = bytesLeft
+ }
+ }
+
+ buf := io.NewSectionReader(r, u.readerPos, n)
+ u.readerPos += n
+
+ return buf, err
+
+ default:
+ packet := make([]byte, u.opts.PartSize)
+ n, err := io.ReadFull(u.in.Body, packet)
+ u.readerPos += int64(n)
+
+ return bytes.NewReader(packet[0:n]), err
+ }
+}
+
+// singlePart contains upload logic for uploading a single chunk via
+// a regular PutObject request. Multipart requests require at least two
+// parts, or at least 5MB of data.
+func (u *uploader) singlePart(buf io.ReadSeeker) (*UploadOutput, error) {
+ params := &s3.PutObjectInput{}
+ awsutil.Copy(params, u.in)
+ params.Body = buf
+
+ req, _ := u.opts.S3.PutObjectRequest(params)
+ if err := req.Send(); err != nil {
+ return nil, err
+ }
+
+ url := req.HTTPRequest.URL.String()
+ return &UploadOutput{Location: url}, nil
+}
+
+// internal structure to manage a specific multipart upload to S3.
+type multiuploader struct {
+ *uploader
+ wg sync.WaitGroup
+ m sync.Mutex
+ err error
+ uploadID string
+ parts completedParts
+}
+
+// keeps track of a single chunk of data being sent to S3.
+type chunk struct {
+ buf io.ReadSeeker
+ num int64
+}
+
+// completedParts is a wrapper to make parts sortable by their part number,
+// since S3 required this list to be sent in sorted order.
+type completedParts []*s3.CompletedPart
+
+func (a completedParts) Len() int { return len(a) }
+func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber }
+
+// upload will perform a multipart upload using the firstBuf buffer containing
+// the first chunk of data.
+func (u *multiuploader) upload(firstBuf io.ReadSeeker) (*UploadOutput, error) {
+ params := &s3.CreateMultipartUploadInput{}
+ awsutil.Copy(params, u.in)
+
+ // Create the multipart
+ resp, err := u.opts.S3.CreateMultipartUpload(params)
+ if err != nil {
+ return nil, err
+ }
+ u.uploadID = *resp.UploadID
+
+ // Create the workers
+ ch := make(chan chunk, u.opts.Concurrency)
+ for i := 0; i < u.opts.Concurrency; i++ {
+ u.wg.Add(1)
+ go u.readChunk(ch)
+ }
+
+ // Send part 1 to the workers
+ var num int64 = 1
+ ch <- chunk{buf: firstBuf, num: num}
+
+ // Read and queue the rest of the parts
+ for u.geterr() == nil {
+ // This upload exceeded maximum number of supported parts, error now.
+ if num > int64(MaxUploadParts) {
+ msg := fmt.Sprintf("exceeded total allowed parts (%d). "+
+ "Adjust PartSize to fit in this limit", MaxUploadParts)
+ u.seterr(awserr.New("TotalPartsExceeded", msg, nil))
+ break
+ }
+
+ num++
+
+ buf, err := u.nextReader()
+ if err == io.EOF {
+ break
+ }
+
+ ch <- chunk{buf: buf, num: num}
+
+ if err != nil && err != io.ErrUnexpectedEOF {
+ u.seterr(awserr.New(
+ "ReadRequestBody",
+ "read multipart upload data failed",
+ err))
+ break
+ }
+ }
+
+ // Close the channel, wait for workers, and complete upload
+ close(ch)
+ u.wg.Wait()
+ complete := u.complete()
+
+ if err := u.geterr(); err != nil {
+ return nil, &multiUploadError{
+ awsError: awserr.New(
+ "MultipartUpload",
+ "upload multipart failed",
+ err),
+ uploadID: u.uploadID,
+ }
+ }
+ return &UploadOutput{
+ Location: *complete.Location,
+ UploadID: u.uploadID,
+ }, nil
+}
+
+// readChunk runs in worker goroutines to pull chunks off of the ch channel
+// and send() them as UploadPart requests.
+func (u *multiuploader) readChunk(ch chan chunk) {
+ defer u.wg.Done()
+ for {
+ data, ok := <-ch
+
+ if !ok {
+ break
+ }
+
+ if u.geterr() == nil {
+ if err := u.send(data); err != nil {
+ u.seterr(err)
+ }
+ }
+ }
+}
+
+// send performs an UploadPart request and keeps track of the completed
+// part information.
+func (u *multiuploader) send(c chunk) error {
+ resp, err := u.opts.S3.UploadPart(&s3.UploadPartInput{
+ Bucket: u.in.Bucket,
+ Key: u.in.Key,
+ Body: c.buf,
+ UploadID: &u.uploadID,
+ PartNumber: &c.num,
+ })
+
+ if err != nil {
+ return err
+ }
+
+ n := c.num
+ completed := &s3.CompletedPart{ETag: resp.ETag, PartNumber: &n}
+
+ u.m.Lock()
+ u.parts = append(u.parts, completed)
+ u.m.Unlock()
+
+ return nil
+}
+
+// geterr is a thread-safe getter for the error object
+func (u *multiuploader) geterr() error {
+ u.m.Lock()
+ defer u.m.Unlock()
+
+ return u.err
+}
+
+// seterr is a thread-safe setter for the error object
+func (u *multiuploader) seterr(e error) {
+ u.m.Lock()
+ defer u.m.Unlock()
+
+ u.err = e
+}
+
+// fail will abort the multipart unless LeavePartsOnError is set to true.
+func (u *multiuploader) fail() {
+ if u.opts.LeavePartsOnError {
+ return
+ }
+
+ u.opts.S3.AbortMultipartUpload(&s3.AbortMultipartUploadInput{
+ Bucket: u.in.Bucket,
+ Key: u.in.Key,
+ UploadID: &u.uploadID,
+ })
+}
+
+// complete successfully completes a multipart upload and returns the response.
+func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput {
+ if u.geterr() != nil {
+ u.fail()
+ return nil
+ }
+
+ // Parts must be sorted in PartNumber order.
+ sort.Sort(u.parts)
+
+ resp, err := u.opts.S3.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{
+ Bucket: u.in.Bucket,
+ Key: u.in.Key,
+ UploadID: &u.uploadID,
+ MultipartUpload: &s3.CompletedMultipartUpload{Parts: u.parts},
+ })
+ if err != nil {
+ u.seterr(err)
+ u.fail()
+ }
+
+ return resp
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_test.go
new file mode 100644
index 0000000..9016419
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_test.go
@@ -0,0 +1,438 @@
+package s3manager_test
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "sort"
+ "sync"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/internal/test/unit"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/aws/aws-sdk-go/service/s3/s3manager"
+ "github.com/stretchr/testify/assert"
+)
+
+var _ = unit.Imported
+var buf12MB = make([]byte, 1024*1024*12)
+var buf2MB = make([]byte, 1024*1024*2)
+
+var emptyList = []string{}
+
+func val(i interface{}, s string) interface{} {
+ return awsutil.ValuesAtPath(i, s)[0]
+}
+
+func contains(src []string, s string) bool {
+ for _, v := range src {
+ if s == v {
+ return true
+ }
+ }
+ return false
+}
+
+func loggingSvc(ignoreOps []string) (*s3.S3, *[]string, *[]interface{}) {
+ var m sync.Mutex
+ partNum := 0
+ names := []string{}
+ params := []interface{}{}
+ svc := s3.New(nil)
+ svc.Handlers.Unmarshal.Clear()
+ svc.Handlers.UnmarshalMeta.Clear()
+ svc.Handlers.UnmarshalError.Clear()
+ svc.Handlers.Send.Clear()
+ svc.Handlers.Send.PushBack(func(r *aws.Request) {
+ m.Lock()
+ defer m.Unlock()
+
+ if !contains(ignoreOps, r.Operation.Name) {
+ names = append(names, r.Operation.Name)
+ params = append(params, r.Params)
+ }
+
+ r.HTTPResponse = &http.Response{
+ StatusCode: 200,
+ Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
+ }
+
+ switch data := r.Data.(type) {
+ case *s3.CreateMultipartUploadOutput:
+ data.UploadID = aws.String("UPLOAD-ID")
+ case *s3.UploadPartOutput:
+ partNum++
+ data.ETag = aws.String(fmt.Sprintf("ETAG%d", partNum))
+ case *s3.CompleteMultipartUploadOutput:
+ data.Location = aws.String("https://location")
+ }
+ })
+
+ return svc, &names, ¶ms
+}
+
+func buflen(i interface{}) int {
+ r := i.(io.Reader)
+ b, _ := ioutil.ReadAll(r)
+ return len(b)
+}
+
+func TestUploadOrderMulti(t *testing.T) {
+ s, ops, args := loggingSvc(emptyList)
+ mgr := s3manager.NewUploader(&s3manager.UploadOptions{S3: s})
+ resp, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: bytes.NewReader(buf12MB),
+ ServerSideEncryption: aws.String("AES256"),
+ ContentType: aws.String("content/type"),
+ })
+
+ assert.NoError(t, err)
+ assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops)
+ assert.Equal(t, "https://location", resp.Location)
+ assert.Equal(t, "UPLOAD-ID", resp.UploadID)
+
+ // Validate input values
+
+ // UploadPart
+ assert.Equal(t, "UPLOAD-ID", val((*args)[1], "UploadID"))
+ assert.Equal(t, "UPLOAD-ID", val((*args)[2], "UploadID"))
+ assert.Equal(t, "UPLOAD-ID", val((*args)[3], "UploadID"))
+
+ // CompleteMultipartUpload
+ assert.Equal(t, "UPLOAD-ID", val((*args)[4], "UploadID"))
+ assert.Equal(t, int64(1), val((*args)[4], "MultipartUpload.Parts[0].PartNumber"))
+ assert.Equal(t, int64(2), val((*args)[4], "MultipartUpload.Parts[1].PartNumber"))
+ assert.Equal(t, int64(3), val((*args)[4], "MultipartUpload.Parts[2].PartNumber"))
+ assert.Regexp(t, `^ETAG\d+$`, val((*args)[4], "MultipartUpload.Parts[0].ETag"))
+ assert.Regexp(t, `^ETAG\d+$`, val((*args)[4], "MultipartUpload.Parts[1].ETag"))
+ assert.Regexp(t, `^ETAG\d+$`, val((*args)[4], "MultipartUpload.Parts[2].ETag"))
+
+ // Custom headers
+ assert.Equal(t, "AES256", val((*args)[0], "ServerSideEncryption"))
+ assert.Equal(t, "content/type", val((*args)[0], "ContentType"))
+}
+
+func TestUploadOrderMultiDifferentPartSize(t *testing.T) {
+ s, ops, args := loggingSvc(emptyList)
+ mgr := s3manager.NewUploader(&s3manager.UploadOptions{
+ S3: s,
+ PartSize: 1024 * 1024 * 7,
+ Concurrency: 1,
+ })
+ _, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: bytes.NewReader(buf12MB),
+ })
+
+ assert.NoError(t, err)
+ assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops)
+
+ // Part lengths
+ assert.Equal(t, 1024*1024*7, buflen(val((*args)[1], "Body")))
+ assert.Equal(t, 1024*1024*5, buflen(val((*args)[2], "Body")))
+}
+
+func TestUploadIncreasePartSize(t *testing.T) {
+ s3manager.MaxUploadParts = 2
+ defer func() { s3manager.MaxUploadParts = 10000 }()
+
+ s, ops, args := loggingSvc(emptyList)
+ opts := &s3manager.UploadOptions{S3: s, Concurrency: 1}
+ mgr := s3manager.NewUploader(opts)
+ _, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: bytes.NewReader(buf12MB),
+ })
+
+ assert.NoError(t, err)
+ assert.Equal(t, int64(0), opts.PartSize) // don't modify orig options
+ assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops)
+
+ // Part lengths
+ assert.Equal(t, 1024*1024*6, buflen(val((*args)[1], "Body")))
+ assert.Equal(t, 1024*1024*6, buflen(val((*args)[2], "Body")))
+}
+
+func TestUploadFailIfPartSizeTooSmall(t *testing.T) {
+ opts := &s3manager.UploadOptions{PartSize: 5}
+ mgr := s3manager.NewUploader(opts)
+ resp, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: bytes.NewReader(buf12MB),
+ })
+
+ assert.Nil(t, resp)
+ assert.NotNil(t, err)
+
+ aerr := err.(awserr.Error)
+ assert.Equal(t, "ConfigError", aerr.Code())
+ assert.Contains(t, aerr.Message(), "part size must be at least")
+}
+
+func TestUploadOrderSingle(t *testing.T) {
+ s, ops, args := loggingSvc(emptyList)
+ mgr := s3manager.NewUploader(&s3manager.UploadOptions{S3: s})
+ resp, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: bytes.NewReader(buf2MB),
+ ServerSideEncryption: aws.String("AES256"),
+ ContentType: aws.String("content/type"),
+ })
+
+ assert.NoError(t, err)
+ assert.Equal(t, []string{"PutObject"}, *ops)
+ assert.NotEqual(t, "", resp.Location)
+ assert.Equal(t, "", resp.UploadID)
+ assert.Equal(t, "AES256", val((*args)[0], "ServerSideEncryption"))
+ assert.Equal(t, "content/type", val((*args)[0], "ContentType"))
+}
+
+func TestUploadOrderSingleFailure(t *testing.T) {
+ s, ops, _ := loggingSvc(emptyList)
+ s.Handlers.Send.PushBack(func(r *aws.Request) {
+ r.HTTPResponse.StatusCode = 400
+ })
+ mgr := s3manager.NewUploader(&s3manager.UploadOptions{S3: s})
+ resp, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: bytes.NewReader(buf2MB),
+ })
+
+ assert.Error(t, err)
+ assert.Equal(t, []string{"PutObject"}, *ops)
+ assert.Nil(t, resp)
+}
+
+func TestUploadOrderZero(t *testing.T) {
+ s, ops, args := loggingSvc(emptyList)
+ mgr := s3manager.NewUploader(&s3manager.UploadOptions{S3: s})
+ resp, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: bytes.NewReader(make([]byte, 0)),
+ })
+
+ assert.NoError(t, err)
+ assert.Equal(t, []string{"PutObject"}, *ops)
+ assert.NotEqual(t, "", resp.Location)
+ assert.Equal(t, "", resp.UploadID)
+ assert.Equal(t, 0, buflen(val((*args)[0], "Body")))
+}
+
+func TestUploadOrderMultiFailure(t *testing.T) {
+ s, ops, _ := loggingSvc(emptyList)
+ s.Handlers.Send.PushBack(func(r *aws.Request) {
+ switch t := r.Data.(type) {
+ case *s3.UploadPartOutput:
+ if *t.ETag == "ETAG2" {
+ r.HTTPResponse.StatusCode = 400
+ }
+ }
+ })
+
+ mgr := s3manager.NewUploader(&s3manager.UploadOptions{S3: s, Concurrency: 1})
+ _, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: bytes.NewReader(buf12MB),
+ })
+
+ assert.Error(t, err)
+ assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "AbortMultipartUpload"}, *ops)
+}
+
+func TestUploadOrderMultiFailureOnComplete(t *testing.T) {
+ s, ops, _ := loggingSvc(emptyList)
+ s.Handlers.Send.PushBack(func(r *aws.Request) {
+ switch r.Data.(type) {
+ case *s3.CompleteMultipartUploadOutput:
+ r.HTTPResponse.StatusCode = 400
+ }
+ })
+
+ mgr := s3manager.NewUploader(&s3manager.UploadOptions{S3: s, Concurrency: 1})
+ _, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: bytes.NewReader(buf12MB),
+ })
+
+ assert.Error(t, err)
+ assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart",
+ "UploadPart", "CompleteMultipartUpload", "AbortMultipartUpload"}, *ops)
+}
+
+func TestUploadOrderMultiFailureOnCreate(t *testing.T) {
+ s, ops, _ := loggingSvc(emptyList)
+ s.Handlers.Send.PushBack(func(r *aws.Request) {
+ switch r.Data.(type) {
+ case *s3.CreateMultipartUploadOutput:
+ r.HTTPResponse.StatusCode = 400
+ }
+ })
+
+ mgr := s3manager.NewUploader(&s3manager.UploadOptions{S3: s})
+ _, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: bytes.NewReader(make([]byte, 1024*1024*12)),
+ })
+
+ assert.Error(t, err)
+ assert.Equal(t, []string{"CreateMultipartUpload"}, *ops)
+}
+
+func TestUploadOrderMultiFailureLeaveParts(t *testing.T) {
+ s, ops, _ := loggingSvc(emptyList)
+ s.Handlers.Send.PushBack(func(r *aws.Request) {
+ switch data := r.Data.(type) {
+ case *s3.UploadPartOutput:
+ if *data.ETag == "ETAG2" {
+ r.HTTPResponse.StatusCode = 400
+ }
+ }
+ })
+
+ mgr := s3manager.NewUploader(&s3manager.UploadOptions{
+ S3: s,
+ Concurrency: 1,
+ LeavePartsOnError: true,
+ })
+ _, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: bytes.NewReader(make([]byte, 1024*1024*12)),
+ })
+
+ assert.Error(t, err)
+ assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart"}, *ops)
+}
+
+type failreader struct {
+ times int
+ failCount int
+}
+
+func (f *failreader) Read(b []byte) (int, error) {
+ f.failCount++
+ if f.failCount >= f.times {
+ return 0, fmt.Errorf("random failure")
+ }
+ return len(b), nil
+}
+
+func TestUploadOrderReadFail1(t *testing.T) {
+ s, ops, _ := loggingSvc(emptyList)
+ mgr := s3manager.NewUploader(&s3manager.UploadOptions{S3: s})
+ _, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: &failreader{times: 1},
+ })
+
+ assert.Equal(t, "ReadRequestBody", err.(awserr.Error).Code())
+ assert.EqualError(t, err.(awserr.Error).OrigErr(), "random failure")
+ assert.Equal(t, []string{}, *ops)
+}
+
+func TestUploadOrderReadFail2(t *testing.T) {
+ s, ops, _ := loggingSvc([]string{"UploadPart"})
+ mgr := s3manager.NewUploader(&s3manager.UploadOptions{S3: s, Concurrency: 1})
+ _, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: &failreader{times: 2},
+ })
+
+ assert.Equal(t, "ReadRequestBody", err.(awserr.Error).Code())
+ assert.EqualError(t, err.(awserr.Error).OrigErr(), "random failure")
+ assert.Equal(t, []string{"CreateMultipartUpload", "AbortMultipartUpload"}, *ops)
+}
+
+type sizedReader struct {
+ size int
+ cur int
+}
+
+func (s *sizedReader) Read(p []byte) (n int, err error) {
+ if s.cur >= s.size {
+ return 0, io.EOF
+ }
+
+ n = len(p)
+ s.cur += len(p)
+ if s.cur > s.size {
+ n -= s.cur - s.size
+ }
+
+ return
+}
+
+func TestUploadOrderMultiBufferedReader(t *testing.T) {
+ s, ops, args := loggingSvc(emptyList)
+ mgr := s3manager.NewUploader(&s3manager.UploadOptions{S3: s})
+ _, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: &sizedReader{size: 1024 * 1024 * 12},
+ })
+
+ assert.NoError(t, err)
+ assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops)
+
+ // Part lengths
+ parts := []int{
+ buflen(val((*args)[1], "Body")),
+ buflen(val((*args)[2], "Body")),
+ buflen(val((*args)[3], "Body")),
+ }
+ sort.Ints(parts)
+ assert.Equal(t, []int{1024 * 1024 * 2, 1024 * 1024 * 5, 1024 * 1024 * 5}, parts)
+}
+
+func TestUploadOrderMultiBufferedReaderExceedTotalParts(t *testing.T) {
+ s3manager.MaxUploadParts = 2
+ defer func() { s3manager.MaxUploadParts = 10000 }()
+ s, ops, _ := loggingSvc([]string{"UploadPart"})
+ mgr := s3manager.NewUploader(&s3manager.UploadOptions{S3: s, Concurrency: 1})
+ resp, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: &sizedReader{size: 1024 * 1024 * 12},
+ })
+
+ assert.Error(t, err)
+ assert.Nil(t, resp)
+ assert.Equal(t, []string{"CreateMultipartUpload", "AbortMultipartUpload"}, *ops)
+
+ aerr := err.(awserr.Error)
+ assert.Equal(t, "TotalPartsExceeded", aerr.Code())
+ assert.Contains(t, aerr.Message(), "exceeded total allowed parts (2)")
+}
+
+func TestUploadOrderSingleBufferedReader(t *testing.T) {
+ s, ops, _ := loggingSvc(emptyList)
+ mgr := s3manager.NewUploader(&s3manager.UploadOptions{S3: s})
+ resp, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: &sizedReader{size: 1024 * 1024 * 2},
+ })
+
+ assert.NoError(t, err)
+ assert.Equal(t, []string{"PutObject"}, *ops)
+ assert.NotEqual(t, "", resp.Location)
+ assert.Equal(t, "", resp.UploadID)
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/service.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/service.go
new file mode 100644
index 0000000..0d25fd2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/service.go
@@ -0,0 +1,57 @@
+// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+
+package s3
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/internal/protocol/restxml"
+ "github.com/aws/aws-sdk-go/internal/signer/v4"
+)
+
+// S3 is a client for Amazon S3.
+type S3 struct {
+ *aws.Service
+}
+
+// Used for custom service initialization logic
+var initService func(*aws.Service)
+
+// Used for custom request initialization logic
+var initRequest func(*aws.Request)
+
+// New returns a new S3 client.
+func New(config *aws.Config) *S3 {
+ service := &aws.Service{
+ Config: aws.DefaultConfig.Merge(config),
+ ServiceName: "s3",
+ APIVersion: "2006-03-01",
+ }
+ service.Initialize()
+
+ // Handlers
+ service.Handlers.Sign.PushBack(v4.Sign)
+ service.Handlers.Build.PushBack(restxml.Build)
+ service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ // Run custom service initialization if present
+ if initService != nil {
+ initService(service)
+ }
+
+ return &S3{service}
+}
+
+// newRequest creates a new request for a S3 operation and runs any
+// custom request initialization.
+func (c *S3) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := aws.NewRequest(c.Service, op, params, data)
+
+ // Run custom request initialization if present
+ if initRequest != nil {
+ initRequest(req)
+ }
+
+ return req
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/sse.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/sse.go
new file mode 100644
index 0000000..01350f7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/sse.go
@@ -0,0 +1,44 @@
+package s3
+
+import (
+ "crypto/md5"
+ "encoding/base64"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+)
+
+var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil)
+
+func validateSSERequiresSSL(r *aws.Request) {
+ if r.HTTPRequest.URL.Scheme != "https" {
+ p := awsutil.ValuesAtPath(r.Params, "SSECustomerKey||CopySourceSSECustomerKey")
+ if len(p) > 0 {
+ r.Error = errSSERequiresSSL
+ }
+ }
+}
+
+func computeSSEKeys(r *aws.Request) {
+ headers := []string{
+ "x-amz-server-side-encryption-customer-key",
+ "x-amz-copy-source-server-side-encryption-customer-key",
+ }
+
+ for _, h := range headers {
+ md5h := h + "-md5"
+ if key := r.HTTPRequest.Header.Get(h); key != "" {
+ // Base64-encode the value
+ b64v := base64.StdEncoding.EncodeToString([]byte(key))
+ r.HTTPRequest.Header.Set(h, b64v)
+
+ // Add MD5 if it wasn't computed
+ if r.HTTPRequest.Header.Get(md5h) == "" {
+ sum := md5.Sum([]byte(key))
+ b64sum := base64.StdEncoding.EncodeToString(sum[:])
+ r.HTTPRequest.Header.Set(md5h, b64sum)
+ }
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/sse_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/sse_test.go
new file mode 100644
index 0000000..e665188
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/sse_test.go
@@ -0,0 +1,81 @@
+package s3_test
+
+import (
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/internal/test/unit"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/stretchr/testify/assert"
+)
+
+var _ = unit.Imported
+
+func TestSSECustomerKeyOverHTTPError(t *testing.T) {
+ s := s3.New(&aws.Config{DisableSSL: true})
+ req, _ := s.CopyObjectRequest(&s3.CopyObjectInput{
+ Bucket: aws.String("bucket"),
+ CopySource: aws.String("bucket/source"),
+ Key: aws.String("dest"),
+ SSECustomerKey: aws.String("key"),
+ })
+ err := req.Build()
+
+ assert.Error(t, err)
+ assert.Equal(t, "ConfigError", err.(awserr.Error).Code())
+ assert.Contains(t, err.(awserr.Error).Message(), "cannot send SSE keys over HTTP")
+}
+
+func TestCopySourceSSECustomerKeyOverHTTPError(t *testing.T) {
+ s := s3.New(&aws.Config{DisableSSL: true})
+ req, _ := s.CopyObjectRequest(&s3.CopyObjectInput{
+ Bucket: aws.String("bucket"),
+ CopySource: aws.String("bucket/source"),
+ Key: aws.String("dest"),
+ CopySourceSSECustomerKey: aws.String("key"),
+ })
+ err := req.Build()
+
+ assert.Error(t, err)
+ assert.Equal(t, "ConfigError", err.(awserr.Error).Code())
+ assert.Contains(t, err.(awserr.Error).Message(), "cannot send SSE keys over HTTP")
+}
+
+func TestComputeSSEKeys(t *testing.T) {
+ s := s3.New(nil)
+ req, _ := s.CopyObjectRequest(&s3.CopyObjectInput{
+ Bucket: aws.String("bucket"),
+ CopySource: aws.String("bucket/source"),
+ Key: aws.String("dest"),
+ SSECustomerKey: aws.String("key"),
+ CopySourceSSECustomerKey: aws.String("key"),
+ })
+ err := req.Build()
+
+ assert.NoError(t, err)
+ assert.Equal(t, "a2V5", req.HTTPRequest.Header.Get("x-amz-server-side-encryption-customer-key"))
+ assert.Equal(t, "a2V5", req.HTTPRequest.Header.Get("x-amz-copy-source-server-side-encryption-customer-key"))
+ assert.Equal(t, "PG4LipwVIkqCKLmpjKFTHQ==", req.HTTPRequest.Header.Get("x-amz-server-side-encryption-customer-key-md5"))
+ assert.Equal(t, "PG4LipwVIkqCKLmpjKFTHQ==", req.HTTPRequest.Header.Get("x-amz-copy-source-server-side-encryption-customer-key-md5"))
+}
+
+func TestComputeSSEKeysShortcircuit(t *testing.T) {
+ s := s3.New(nil)
+ req, _ := s.CopyObjectRequest(&s3.CopyObjectInput{
+ Bucket: aws.String("bucket"),
+ CopySource: aws.String("bucket/source"),
+ Key: aws.String("dest"),
+ SSECustomerKey: aws.String("key"),
+ CopySourceSSECustomerKey: aws.String("key"),
+ SSECustomerKeyMD5: aws.String("MD5"),
+ CopySourceSSECustomerKeyMD5: aws.String("MD5"),
+ })
+ err := req.Build()
+
+ assert.NoError(t, err)
+ assert.Equal(t, "a2V5", req.HTTPRequest.Header.Get("x-amz-server-side-encryption-customer-key"))
+ assert.Equal(t, "a2V5", req.HTTPRequest.Header.Get("x-amz-copy-source-server-side-encryption-customer-key"))
+ assert.Equal(t, "MD5", req.HTTPRequest.Header.Get("x-amz-server-side-encryption-customer-key-md5"))
+ assert.Equal(t, "MD5", req.HTTPRequest.Header.Get("x-amz-copy-source-server-side-encryption-customer-key-md5"))
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
new file mode 100644
index 0000000..c27d434
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
@@ -0,0 +1,42 @@
+package s3
+
+import (
+ "encoding/xml"
+ "io"
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+type xmlErrorResponse struct {
+ XMLName xml.Name `xml:"Error"`
+ Code string `xml:"Code"`
+ Message string `xml:"Message"`
+}
+
+func unmarshalError(r *aws.Request) {
+ defer r.HTTPResponse.Body.Close()
+
+ if r.HTTPResponse.ContentLength == int64(0) {
+ // No body, use status code to generate an awserr.Error
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(strings.Replace(r.HTTPResponse.Status, " ", "", -1), r.HTTPResponse.Status, nil),
+ r.HTTPResponse.StatusCode,
+ "",
+ )
+ return
+ }
+
+ resp := &xmlErrorResponse{}
+ err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp)
+ if err != nil && err != io.EOF {
+ r.Error = awserr.New("SerializationError", "failed to decode S3 XML error response", nil)
+ } else {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(resp.Code, resp.Message, nil),
+ r.HTTPResponse.StatusCode,
+ "",
+ )
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error_test.go
new file mode 100644
index 0000000..ee08d62
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error_test.go
@@ -0,0 +1,53 @@
+package s3_test
+
+import (
+ "bytes"
+ "io/ioutil"
+ "net/http"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/internal/test/unit"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/stretchr/testify/assert"
+)
+
+var _ = unit.Imported
+
+var s3StatusCodeErrorTests = []struct {
+ scode int
+ status string
+ body string
+ code string
+ message string
+}{
+ {301, "Moved Permanently", "", "MovedPermanently", "Moved Permanently"},
+ {403, "Forbidden", "", "Forbidden", "Forbidden"},
+ {400, "Bad Request", "", "BadRequest", "Bad Request"},
+ {404, "Not Found", "", "NotFound", "Not Found"},
+ {500, "Internal Error", "", "InternalError", "Internal Error"},
+}
+
+func TestStatusCodeError(t *testing.T) {
+ for _, test := range s3StatusCodeErrorTests {
+ s := s3.New(nil)
+ s.Handlers.Send.Clear()
+ s.Handlers.Send.PushBack(func(r *aws.Request) {
+ body := ioutil.NopCloser(bytes.NewReader([]byte(test.body)))
+ r.HTTPResponse = &http.Response{
+ ContentLength: int64(len(test.body)),
+ StatusCode: test.scode,
+ Status: test.status,
+ Body: body,
+ }
+ })
+ _, err := s.PutBucketACL(&s3.PutBucketACLInput{
+ Bucket: aws.String("bucket"), ACL: aws.String("public-read"),
+ })
+
+ assert.Error(t, err)
+ assert.Equal(t, test.code, err.(awserr.Error).Code())
+ assert.Equal(t, test.message, err.(awserr.Error).Message())
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/.gitignore b/Godeps/_workspace/src/github.com/flosch/pongo2/.gitignore
new file mode 100644
index 0000000..37eaf44
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/.gitignore
@@ -0,0 +1,40 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+.idea
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+.project
+EBNF.txt
+test1.tpl
+pongo2_internal_test.go
+tpl-error.out
+/count.out
+/cover.out
+*.swp
+*.iml
+/cpu.out
+/mem.out
+/pongo2.test
+*.error
+/profile
+/coverage.out
+/pongo2_internal_test.ignore
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/.travis.yml b/Godeps/_workspace/src/github.com/flosch/pongo2/.travis.yml
new file mode 100644
index 0000000..18971e1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/.travis.yml
@@ -0,0 +1,12 @@
+language: go
+
+go:
+ - 1.4
+ - tip
+install:
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/mattn/goveralls
+ - go get gopkg.in/check.v1
+script:
+ - go test -v -covermode=count -coverprofile=coverage.out -bench . -cpu 1,4
+ - '[ "${TRAVIS_PULL_REQUEST}" = "false" ] && $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci -repotoken $COVERALLS_TOKEN || true'
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/AUTHORS b/Godeps/_workspace/src/github.com/flosch/pongo2/AUTHORS
new file mode 100644
index 0000000..b552df4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/AUTHORS
@@ -0,0 +1,10 @@
+Main author and maintainer of pongo2:
+
+* Florian Schlachter
+
+Contributors (in no specific order):
+
+* @romanoaugusto88
+* @vitalbh
+
+Feel free to add yourself to the list or to modify your entry if you did a contribution.
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/LICENSE b/Godeps/_workspace/src/github.com/flosch/pongo2/LICENSE
new file mode 100644
index 0000000..e876f86
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013-2014 Florian Schlachter
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/README.md b/Godeps/_workspace/src/github.com/flosch/pongo2/README.md
new file mode 100644
index 0000000..a5112d0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/README.md
@@ -0,0 +1,251 @@
+# [pongo](https://en.wikipedia.org/wiki/Pongo_%28genus%29)2
+
+[![GoDoc](https://godoc.org/github.com/flosch/pongo2?status.png)](https://godoc.org/github.com/flosch/pongo2)
+[![Build Status](https://travis-ci.org/flosch/pongo2.svg?branch=master)](https://travis-ci.org/flosch/pongo2)
+[![Coverage Status](https://coveralls.io/repos/flosch/pongo2/badge.png?branch=master)](https://coveralls.io/r/flosch/pongo2?branch=master)
+[![gratipay](http://img.shields.io/badge/gratipay-support%20pongo-brightgreen.svg)](https://gratipay.com/flosch/)
+[![Bountysource](https://www.bountysource.com/badge/tracker?tracker_id=3654947)](https://www.bountysource.com/trackers/3654947-pongo2?utm_source=3654947&utm_medium=shield&utm_campaign=TRACKER_BADGE)
+
+pongo2 is the successor of [pongo](https://github.com/flosch/pongo), a Django-syntax like templating-language.
+
+Install/update using `go get` (no dependencies required by pongo2):
+```
+go get -u github.com/flosch/pongo2
+```
+
+Please use the [issue tracker](https://github.com/flosch/pongo2/issues) if you're encountering any problems with pongo2 or if you need help with implementing tags or filters ([create a ticket!](https://github.com/flosch/pongo2/issues/new)). If possible, please use [playground](https://www.florian-schlachter.de/pongo2/) to create a short test case on what's wrong and include the link to the snippet in your issue.
+
+**New**: [Try pongo2 out in the pongo2 playground.](https://www.florian-schlachter.de/pongo2/)
+
+## First impression of a template
+
+```HTML+Django
+Our admins and users
+{# This is a short example to give you a quick overview of pongo2's syntax. #}
+
+{% macro user_details(user, is_admin=false) %}
+
+
+
= 40) || (user.karma > calc_avg_karma(userlist)+5) %}
+ class="karma-good"{% endif %}>
+
+
+ {{ user }}
+
+
+
+
This user registered {{ user.register_date|naturaltime }}.
+
+
+
The user's biography:
+
{{ user.biography|markdown|truncatewords_html:15 }}
+ read more
+
+ {% if is_admin %}
This user is an admin!
{% endif %}
+
+{% endmacro %}
+
+
+
+
+ Our admins
+ {% for admin in adminlist %}
+ {{ user_details(admin, true) }}
+ {% endfor %}
+
+ Our members
+ {% for user in userlist %}
+ {{ user_details(user) }}
+ {% endfor %}
+
+
+```
+
+## Development status
+
+**Latest stable release**: v3.0 (`go get -u gopkg.in/flosch/pongo2.v3` / [`v3`](https://github.com/flosch/pongo2/tree/v3)-branch) [[read the announcement](https://www.florian-schlachter.de/post/pongo2-v3/)]
+
+**Current development**: v4 (`master`-branch)
+
+*Note*: With the release of pongo v4 the branch v2 will be deprecated.
+
+**Deprecated versions** (not supported anymore): v1
+
+| Topic | Status |
+| ------------------------------------ | -------------------------------------------------------------------------------------- |
+| Django version compatibility: | [1.7](https://docs.djangoproject.com/en/1.7/ref/templates/builtins/) |
+| *Missing* (planned) **filters**: | none ([hints](https://github.com/flosch/pongo2/blob/master/filters_builtin.go#L3)) |
+| *Missing* (planned) **tags**: | none ([hints](https://github.com/flosch/pongo2/blob/master/tags.go#L3)) |
+
+Please also have a look on the [caveats](https://github.com/flosch/pongo2#caveats) and on the [official add-ons](https://github.com/flosch/pongo2#official).
+
+## Features (and new in pongo2)
+
+ * Entirely rewritten from the ground-up.
+ * [Advanced C-like expressions](https://github.com/flosch/pongo2/blob/master/template_tests/expressions.tpl).
+ * [Complex function calls within expressions](https://github.com/flosch/pongo2/blob/master/template_tests/function_calls_wrapper.tpl).
+ * [Easy API to create new filters and tags](http://godoc.org/github.com/flosch/pongo2#RegisterFilter) ([including parsing arguments](http://godoc.org/github.com/flosch/pongo2#Parser))
+ * Additional features:
+ * Macros including importing macros from other files (see [template_tests/macro.tpl](https://github.com/flosch/pongo2/blob/master/template_tests/macro.tpl))
+ * [Template sandboxing](https://godoc.org/github.com/flosch/pongo2#TemplateSet) ([directory patterns](http://golang.org/pkg/path/filepath/#Match), banned tags/filters)
+
+## Recent API changes within pongo2
+
+If you're using the `master`-branch of pongo2, you might be interested in this section. Since pongo2 is still in development (even though there is a first stable release!), there could be (backwards-incompatible) API changes over time. To keep track of these and therefore make it painless for you to adapt your codebase, I'll list them here.
+
+ * Function signature for tag execution changed: not taking a `bytes.Buffer` anymore; instead `Execute()`-functions are now taking a `TemplateWriter` interface.
+ * Function signature for tag and filter parsing/execution changed (`error` return type changed to `*Error`).
+ * `INodeEvaluator` has been removed and got replaced by `IEvaluator`. You can change your existing tags/filters by simply replacing the interface.
+ * Two new helper functions: [`RenderTemplateFile()`](https://godoc.org/github.com/flosch/pongo2#RenderTemplateFile) and [`RenderTemplateString()`](https://godoc.org/github.com/flosch/pongo2#RenderTemplateString).
+ * `Template.ExecuteRW()` is now [`Template.ExecuteWriter()`](https://godoc.org/github.com/flosch/pongo2#Template.ExecuteWriter)
+ * `Template.Execute*()` functions do now take a `pongo2.Context` directly (no pointer anymore).
+
+## How you can help
+
+ * Write [filters](https://github.com/flosch/pongo2/blob/master/filters_builtin.go#L3) / [tags](https://github.com/flosch/pongo2/blob/master/tags.go#L4) (see [tutorial](https://www.florian-schlachter.de/post/pongo2/)) by forking pongo2 and sending pull requests
+ * Write/improve code tests (use the following command to see what tests are missing: `go test -v -cover -covermode=count -coverprofile=cover.out && go tool cover -html=cover.out` or have a look on [gocover.io/github.com/flosch/pongo2](http://gocover.io/github.com/flosch/pongo2))
+ * Write/improve template tests (see the `template_tests/` directory)
+ * Write middleware, libraries and websites using pongo2. :-)
+
+# Documentation
+
+For a documentation on how the templating language works you can [head over to the Django documentation](https://docs.djangoproject.com/en/dev/topics/templates/). pongo2 aims to be compatible with it.
+
+You can access pongo2's API documentation on [godoc](https://godoc.org/github.com/flosch/pongo2).
+
+## Blog post series
+
+ * [pongo2 v3 released](https://www.florian-schlachter.de/post/pongo2-v3/)
+ * [pongo2 v2 released](https://www.florian-schlachter.de/post/pongo2-v2/)
+ * [pongo2 1.0 released](https://www.florian-schlachter.de/post/pongo2-10/) [August 8th 2014]
+ * [pongo2 playground](https://www.florian-schlachter.de/post/pongo2-playground/) [August 1st 2014]
+ * [Release of pongo2 1.0-rc1 + pongo2-addons](https://www.florian-schlachter.de/post/pongo2-10-rc1/) [July 30th 2014]
+ * [Introduction to pongo2 + migration- and "how to write tags/filters"-tutorial.](https://www.florian-schlachter.de/post/pongo2/) [June 29th 2014]
+
+## Caveats
+
+### Filters
+
+ * **date** / **time**: The `date` and `time` filter are taking the Golang specific time- and date-format (not Django's one) currently. [Take a look on the format here](http://golang.org/pkg/time/#Time.Format).
+ * **stringformat**: `stringformat` does **not** take Python's string format syntax as a parameter, instead it takes Go's. Essentially `{{ 3.14|stringformat:"pi is %.2f" }}` is `fmt.Sprintf("pi is %.2f", 3.14)`.
+ * **escape** / **force_escape**: Unlike Django's behaviour, the `escape`-filter is applied immediately. Therefore there is no need for a `force_escape`-filter yet.
+
+### Tags
+
+ * **for**: All the `forloop` fields (like `forloop.counter`) are written with a capital letter at the beginning. For example, the `counter` can be accessed by `forloop.Counter` and the parentloop by `forloop.Parentloop`.
+ * **now**: takes Go's time format (see **date** and **time**-filter).
+
+### Misc
+
+ * **not in-operator**: You can check whether a map/struct/string contains a key/field/substring by using the in-operator (or the negation of it):
+ `{% if key in map %}Key is in map{% else %}Key not in map{% endif %}` or `{% if !(key in map) %}Key is NOT in map{% else %}Key is in map{% endif %}`.
+
+# Add-ons, libraries and helpers
+
+## Official
+
+ * [ponginae](https://github.com/flosch/ponginae) - A web-framework for Go (using pongo2).
+ * [pongo2-tools](https://github.com/flosch/pongo2-tools) - Official tools and helpers for pongo2
+ * [pongo2-addons](https://github.com/flosch/pongo2-addons) - Official additional filters/tags for pongo2 (for example a **markdown**-filter). They are in their own repository because they're relying on 3rd-party-libraries.
+
+## 3rd-party
+
+ * [beego-pongo2](https://github.com/oal/beego-pongo2) - A tiny little helper for using Pongo2 with [Beego](https://github.com/astaxie/beego).
+ * [beego-pongo2.v2](https://github.com/ipfans/beego-pongo2.v2) - Same as `beego-pongo2`, but for pongo2 v2.
+ * [macaron-pongo2](https://github.com/macaron-contrib/pongo2) - pongo2 support for [Macaron](https://github.com/Unknwon/macaron), a modular web framework.
+ * [ginpongo2](https://github.com/ngerakines/ginpongo2) - middleware for [gin](github.com/gin-gonic/gin) to use pongo2 templates
+ * [pongo2-trans](https://github.com/fromYukki/pongo2trans) - `trans`-tag implementation for internationalization
+ * [tpongo2](https://github.com/tango-contrib/tpongo2) - pongo2 support for [Tango](https://github.com/lunny/tango), a micro-kernel & pluggable web framework.
+
+Please add your project to this list and send me a pull request when you've developed something nice for pongo2.
+
+# API-usage examples
+
+Please see the documentation for a full list of provided API methods.
+
+## A tiny example (template string)
+
+```Go
+// Compile the template first (i. e. creating the AST)
+tpl, err := pongo2.FromString("Hello {{ name|capfirst }}!")
+if err != nil {
+ panic(err)
+}
+// Now you can render the template with the given
+// pongo2.Context how often you want to.
+out, err := tpl.Execute(pongo2.Context{"name": "florian"})
+if err != nil {
+ panic(err)
+}
+fmt.Println(out) // Output: Hello Florian!
+```
+
+## Example server-usage (template file)
+
+```Go
+package main
+
+import (
+ "github.com/flosch/pongo2"
+ "net/http"
+)
+
+// Pre-compiling the templates at application startup using the
+// little Must()-helper function (Must() will panic if FromFile()
+// or FromString() will return with an error - that's it).
+// It's faster to pre-compile it anywhere at startup and only
+// execute the template later.
+var tplExample = pongo2.Must(pongo2.FromFile("example.html"))
+
+func examplePage(w http.ResponseWriter, r *http.Request) {
+ // Execute the template per HTTP request
+ err := tplExample.ExecuteWriter(pongo2.Context{"query": r.FormValue("query")}, w)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+}
+
+func main() {
+ http.HandleFunc("/", examplePage)
+ http.ListenAndServe(":8080", nil)
+}
+```
+
+# Benchmark
+
+The benchmarks have been run on the my machine (`Intel(R) Core(TM) i7-2600 CPU @ 3.40GHz`) using the command:
+
+ go test -bench . -cpu 1,2,4,8
+
+All benchmarks are compiling (depends on the benchmark) and executing the `template_tests/complex.tpl` template.
+
+The results are:
+
+ BenchmarkExecuteComplexWithSandboxActive 50000 60450 ns/op
+ BenchmarkExecuteComplexWithSandboxActive-2 50000 56998 ns/op
+ BenchmarkExecuteComplexWithSandboxActive-4 50000 60343 ns/op
+ BenchmarkExecuteComplexWithSandboxActive-8 50000 64229 ns/op
+ BenchmarkCompileAndExecuteComplexWithSandboxActive 10000 164410 ns/op
+ BenchmarkCompileAndExecuteComplexWithSandboxActive-2 10000 156682 ns/op
+ BenchmarkCompileAndExecuteComplexWithSandboxActive-4 10000 164821 ns/op
+ BenchmarkCompileAndExecuteComplexWithSandboxActive-8 10000 171806 ns/op
+ BenchmarkParallelExecuteComplexWithSandboxActive 50000 60428 ns/op
+ BenchmarkParallelExecuteComplexWithSandboxActive-2 50000 31887 ns/op
+ BenchmarkParallelExecuteComplexWithSandboxActive-4 100000 22810 ns/op
+ BenchmarkParallelExecuteComplexWithSandboxActive-8 100000 18820 ns/op
+ BenchmarkExecuteComplexWithoutSandbox 50000 56942 ns/op
+ BenchmarkExecuteComplexWithoutSandbox-2 50000 56168 ns/op
+ BenchmarkExecuteComplexWithoutSandbox-4 50000 57838 ns/op
+ BenchmarkExecuteComplexWithoutSandbox-8 50000 60539 ns/op
+ BenchmarkCompileAndExecuteComplexWithoutSandbox 10000 162086 ns/op
+ BenchmarkCompileAndExecuteComplexWithoutSandbox-2 10000 159771 ns/op
+ BenchmarkCompileAndExecuteComplexWithoutSandbox-4 10000 163826 ns/op
+ BenchmarkCompileAndExecuteComplexWithoutSandbox-8 10000 169062 ns/op
+ BenchmarkParallelExecuteComplexWithoutSandbox 50000 57152 ns/op
+ BenchmarkParallelExecuteComplexWithoutSandbox-2 50000 30276 ns/op
+ BenchmarkParallelExecuteComplexWithoutSandbox-4 100000 22065 ns/op
+ BenchmarkParallelExecuteComplexWithoutSandbox-8 100000 18034 ns/op
+
+Benchmarked on October 2nd 2014.
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/context.go b/Godeps/_workspace/src/github.com/flosch/pongo2/context.go
new file mode 100644
index 0000000..df587c8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/context.go
@@ -0,0 +1,122 @@
+package pongo2
+
+import (
+ "fmt"
+ "regexp"
+)
+
+var reIdentifiers = regexp.MustCompile("^[a-zA-Z0-9_]+$")
+
+// Use this Context type to provide constants, variables, instances or functions to your template.
+//
+// pongo2 automatically provides meta-information or functions through the "pongo2"-key.
+// Currently, context["pongo2"] contains the following keys:
+// 1. version: returns the version string
+//
+// Template examples for accessing items from your context:
+// {{ myconstant }}
+// {{ myfunc("test", 42) }}
+// {{ user.name }}
+// {{ pongo2.version }}
+type Context map[string]interface{}
+
+func (c Context) checkForValidIdentifiers() *Error {
+ for k, v := range c {
+ if !reIdentifiers.MatchString(k) {
+ return &Error{
+ Sender: "checkForValidIdentifiers",
+ ErrorMsg: fmt.Sprintf("Context-key '%s' (value: '%+v') is not a valid identifier.", k, v),
+ }
+ }
+ }
+ return nil
+}
+
+func (c Context) Update(other Context) Context {
+ for k, v := range other {
+ c[k] = v
+ }
+ return c
+}
+
+// If you're writing a custom tag, your tag's Execute()-function will
+// have access to the ExecutionContext. This struct stores anything
+// about the current rendering process's Context including
+// the Context provided by the user (field Public).
+// You can safely use the Private context to provide data to the user's
+// template (like a 'forloop'-information). The Shared-context is used
+// to share data between tags. All ExecutionContexts share this context.
+//
+// Please be careful when accessing the Public data.
+// PLEASE DO NOT MODIFY THE PUBLIC CONTEXT (read-only).
+//
+// To create your own execution context within tags, use the
+// NewChildExecutionContext(parent) function.
+type ExecutionContext struct {
+ template *Template
+
+ Autoescape bool
+ Public Context
+ Private Context
+ Shared Context
+}
+
+var pongo2MetaContext = Context{
+ "version": Version,
+}
+
+func newExecutionContext(tpl *Template, ctx Context) *ExecutionContext {
+ privateCtx := make(Context)
+
+ // Make the pongo2-related funcs/vars available to the context
+ privateCtx["pongo2"] = pongo2MetaContext
+
+ return &ExecutionContext{
+ template: tpl,
+
+ Public: ctx,
+ Private: privateCtx,
+ Autoescape: true,
+ }
+}
+
+func NewChildExecutionContext(parent *ExecutionContext) *ExecutionContext {
+ newctx := &ExecutionContext{
+ template: parent.template,
+
+ Public: parent.Public,
+ Private: make(Context),
+ Autoescape: parent.Autoescape,
+ }
+ newctx.Shared = parent.Shared
+
+ // Copy all existing private items
+ newctx.Private.Update(parent.Private)
+
+ return newctx
+}
+
+func (ctx *ExecutionContext) Error(msg string, token *Token) *Error {
+ filename := ctx.template.name
+ var line, col int
+ if token != nil {
+ // No tokens available
+ // TODO: Add location (from where?)
+ filename = token.Filename
+ line = token.Line
+ col = token.Col
+ }
+ return &Error{
+ Template: ctx.template,
+ Filename: filename,
+ Line: line,
+ Column: col,
+ Token: token,
+ Sender: "execution",
+ ErrorMsg: msg,
+ }
+}
+
+func (ctx *ExecutionContext) Logf(format string, args ...interface{}) {
+ ctx.template.set.logf(format, args...)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/doc.go b/Godeps/_workspace/src/github.com/flosch/pongo2/doc.go
new file mode 100644
index 0000000..5a23e2b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/doc.go
@@ -0,0 +1,31 @@
+// A Django-syntax like template-engine
+//
+// Blog posts about pongo2 (including introduction and migration):
+// https://www.florian-schlachter.de/?tag=pongo2
+//
+// Complete documentation on the template language:
+// https://docs.djangoproject.com/en/dev/topics/templates/
+//
+// Try out pongo2 live in the pongo2 playground:
+// https://www.florian-schlachter.de/pongo2/
+//
+// Make sure to read README.md in the repository as well.
+//
+// A tiny example with template strings:
+//
+// (Snippet on playground: https://www.florian-schlachter.de/pongo2/?id=1206546277)
+//
+// // Compile the template first (i. e. creating the AST)
+// tpl, err := pongo2.FromString("Hello {{ name|capfirst }}!")
+// if err != nil {
+// panic(err)
+// }
+// // Now you can render the template with the given
+// // pongo2.Context how often you want to.
+// out, err := tpl.Execute(pongo2.Context{"name": "fred"})
+// if err != nil {
+// panic(err)
+// }
+// fmt.Println(out) // Output: Hello Fred!
+//
+package pongo2
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/docs/examples.md b/Godeps/_workspace/src/github.com/flosch/pongo2/docs/examples.md
new file mode 100644
index 0000000..a98bb3a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/docs/examples.md
@@ -0,0 +1 @@
+(Stub, TBA)
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/docs/filters.md b/Godeps/_workspace/src/github.com/flosch/pongo2/docs/filters.md
new file mode 100644
index 0000000..40a3253
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/docs/filters.md
@@ -0,0 +1,68 @@
+TODO:
+
+* What are filters?
+* List+explain all existing filters (pongo2 + pongo2-addons)
+
+Implemented filters so far which needs documentation:
+
+* escape
+* safe
+* escapejs
+* add
+* addslashes
+* capfirst
+* center
+* cut
+* date
+* default
+* default_if_none
+* divisibleby
+* first
+* floatformat
+* get_digit
+* iriencode
+* join
+* last
+* length
+* length_is
+* linebreaks
+* linebreaksbr
+* linenumbers
+* ljust
+* lower
+* make_list
+* phone2numeric
+* pluralize
+* random
+* removetags
+* rjust
+* slice
+* stringformat
+* striptags
+* time
+* title
+* truncatechars
+* truncatechars_html
+* truncatewords
+* truncatewords_html
+* upper
+* urlencode
+* urlize
+* urlizetrunc
+* wordcount
+* wordwrap
+* yesno
+
+* filesizeformat*
+* slugify*
+* truncatesentences*
+* truncatesentences_html*
+* markdown*
+* intcomma*
+* ordinal*
+* naturalday*
+* timesince*
+* timeuntil*
+* naturaltime*
+
+Filters marked with * are available through [pongo2-addons](https://github.com/flosch/pongo2-addons).
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/docs/index.md b/Godeps/_workspace/src/github.com/flosch/pongo2/docs/index.md
new file mode 100644
index 0000000..a98bb3a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/docs/index.md
@@ -0,0 +1 @@
+(Stub, TBA)
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/docs/macros.md b/Godeps/_workspace/src/github.com/flosch/pongo2/docs/macros.md
new file mode 100644
index 0000000..2b27069
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/docs/macros.md
@@ -0,0 +1 @@
+(Stub, TBA)
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/docs/tags.md b/Godeps/_workspace/src/github.com/flosch/pongo2/docs/tags.md
new file mode 100644
index 0000000..dae4566
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/docs/tags.md
@@ -0,0 +1,31 @@
+TODO:
+
+* What are tags?
+* List+explain all existing tags (pongo2 + pongo2-addons)
+
+Implemented tags so far which needs documentation:
+
+* autoescape
+* block
+* comment
+* cycle
+* extends
+* filter
+* firstof
+* for
+* if
+* ifchanged
+* ifequal
+* ifnotequal
+* import
+* include
+* lorem
+* macro
+* now
+* set
+* spaceless
+* ssi
+* templatetag
+* verbatim
+* widthratio
+* with
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/docs/template_sets.md b/Godeps/_workspace/src/github.com/flosch/pongo2/docs/template_sets.md
new file mode 100644
index 0000000..a98bb3a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/docs/template_sets.md
@@ -0,0 +1 @@
+(Stub, TBA)
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/docs/write_filters.md b/Godeps/_workspace/src/github.com/flosch/pongo2/docs/write_filters.md
new file mode 100644
index 0000000..e69de29
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/docs/write_tags.md b/Godeps/_workspace/src/github.com/flosch/pongo2/docs/write_tags.md
new file mode 100644
index 0000000..e69de29
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/error.go b/Godeps/_workspace/src/github.com/flosch/pongo2/error.go
new file mode 100644
index 0000000..c1ee86e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/error.go
@@ -0,0 +1,86 @@
+package pongo2
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+)
+
+// This Error type is being used to address an error during lexing, parsing or
+// execution. If you want to return an error object (for example in your own
+// tag or filter) fill this object with as much information as you have.
+// Make sure "Sender" is always given (if you're returning an error within
+// a filter, make Sender equals 'filter:yourfilter'; same goes for tags: 'tag:mytag').
+// It's okay if you only fill in ErrorMsg if you don't have any other details at hand.
+type Error struct {
+ Template *Template
+ Filename string
+ Line int
+ Column int
+ Token *Token
+ Sender string
+ ErrorMsg string
+}
+
+func (e *Error) updateFromTokenIfNeeded(template *Template, t *Token) *Error {
+ if e.Template == nil {
+ e.Template = template
+ }
+
+ if e.Token == nil {
+ e.Token = t
+ if e.Line <= 0 {
+ e.Line = t.Line
+ e.Column = t.Col
+ }
+ }
+
+ return e
+}
+
+// Returns a nice formatted error string.
+func (e *Error) Error() string {
+ s := "[Error"
+ if e.Sender != "" {
+ s += " (where: " + e.Sender + ")"
+ }
+ if e.Filename != "" {
+ s += " in " + e.Filename
+ }
+ if e.Line > 0 {
+ s += fmt.Sprintf(" | Line %d Col %d", e.Line, e.Column)
+ if e.Token != nil {
+ s += fmt.Sprintf(" near '%s'", e.Token.Val)
+ }
+ }
+ s += "] "
+ s += e.ErrorMsg
+ return s
+}
+
+// Returns the affected line from the original template, if available.
+func (e *Error) RawLine() (line string, available bool) {
+ if e.Line <= 0 || e.Filename == "" {
+ return "", false
+ }
+
+ filename := e.Filename
+ if e.Template != nil {
+ filename = e.Template.set.resolveFilename(e.Template, e.Filename)
+ }
+ file, err := os.Open(filename)
+ if err != nil {
+ panic(err)
+ }
+ defer file.Close()
+
+ scanner := bufio.NewScanner(file)
+ l := 0
+ for scanner.Scan() {
+ l++
+ if l == e.Line {
+ return scanner.Text(), true
+ }
+ }
+ return "", false
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/filters.go b/Godeps/_workspace/src/github.com/flosch/pongo2/filters.go
new file mode 100644
index 0000000..229f7fe
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/filters.go
@@ -0,0 +1,133 @@
+package pongo2
+
+import (
+ "fmt"
+)
+
+type FilterFunction func(in *Value, param *Value) (out *Value, err *Error)
+
+var filters map[string]FilterFunction
+
+func init() {
+ filters = make(map[string]FilterFunction)
+}
+
+// Registers a new filter. If there's already a filter with the same
+// name, RegisterFilter will panic. You usually want to call this
+// function in the filter's init() function:
+// http://golang.org/doc/effective_go.html#init
+//
+// See http://www.florian-schlachter.de/post/pongo2/ for more about
+// writing filters and tags.
+func RegisterFilter(name string, fn FilterFunction) {
+ _, existing := filters[name]
+ if existing {
+ panic(fmt.Sprintf("Filter with name '%s' is already registered.", name))
+ }
+ filters[name] = fn
+}
+
+// Replaces an already registered filter with a new implementation. Use this
+// function with caution since it allows you to change existing filter behaviour.
+func ReplaceFilter(name string, fn FilterFunction) {
+ _, existing := filters[name]
+ if !existing {
+ panic(fmt.Sprintf("Filter with name '%s' does not exist (therefore cannot be overridden).", name))
+ }
+ filters[name] = fn
+}
+
+// Like ApplyFilter, but panics on an error
+func MustApplyFilter(name string, value *Value, param *Value) *Value {
+ val, err := ApplyFilter(name, value, param)
+ if err != nil {
+ panic(err)
+ }
+ return val
+}
+
+// Applies a filter to a given value using the given parameters. Returns a *pongo2.Value or an error.
+func ApplyFilter(name string, value *Value, param *Value) (*Value, *Error) {
+ fn, existing := filters[name]
+ if !existing {
+ return nil, &Error{
+ Sender: "applyfilter",
+ ErrorMsg: fmt.Sprintf("Filter with name '%s' not found.", name),
+ }
+ }
+
+ // Make sure param is a *Value
+ if param == nil {
+ param = AsValue(nil)
+ }
+
+ return fn(value, param)
+}
+
+type filterCall struct {
+ token *Token
+
+ name string
+ parameter IEvaluator
+
+ filterFunc FilterFunction
+}
+
+func (fc *filterCall) Execute(v *Value, ctx *ExecutionContext) (*Value, *Error) {
+ var param *Value
+ var err *Error
+
+ if fc.parameter != nil {
+ param, err = fc.parameter.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ param = AsValue(nil)
+ }
+
+ filtered_value, err := fc.filterFunc(v, param)
+ if err != nil {
+ return nil, err.updateFromTokenIfNeeded(ctx.template, fc.token)
+ }
+ return filtered_value, nil
+}
+
+// Filter = IDENT | IDENT ":" FilterArg | IDENT "|" Filter
+func (p *Parser) parseFilter() (*filterCall, *Error) {
+ ident_token := p.MatchType(TokenIdentifier)
+
+ // Check filter ident
+ if ident_token == nil {
+ return nil, p.Error("Filter name must be an identifier.", nil)
+ }
+
+ filter := &filterCall{
+ token: ident_token,
+ name: ident_token.Val,
+ }
+
+ // Get the appropriate filter function and bind it
+ filterFn, exists := filters[ident_token.Val]
+ if !exists {
+ return nil, p.Error(fmt.Sprintf("Filter '%s' does not exist.", ident_token.Val), ident_token)
+ }
+
+ filter.filterFunc = filterFn
+
+ // Check for filter-argument (2 tokens needed: ':' ARG)
+ if p.Match(TokenSymbol, ":") != nil {
+ if p.Peek(TokenSymbol, "}}") != nil {
+ return nil, p.Error("Filter parameter required after ':'.", nil)
+ }
+
+ // Get filter argument expression
+ v, err := p.parseVariableOrLiteral()
+ if err != nil {
+ return nil, err
+ }
+ filter.parameter = v
+ }
+
+ return filter, nil
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/filters_builtin.go b/Godeps/_workspace/src/github.com/flosch/pongo2/filters_builtin.go
new file mode 100644
index 0000000..aaa68b1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/filters_builtin.go
@@ -0,0 +1,903 @@
+package pongo2
+
+/* Filters that are provided through github.com/flosch/pongo2-addons:
+ ------------------------------------------------------------------
+
+ filesizeformat
+ slugify
+ timesince
+ timeuntil
+
+ Filters that won't be added:
+ ----------------------------
+
+ get_static_prefix (reason: web-framework specific)
+ pprint (reason: python-specific)
+ static (reason: web-framework specific)
+
+ Reconsideration (not implemented yet):
+ --------------------------------------
+
+ force_escape (reason: not yet needed since this is the behaviour of pongo2's escape filter)
+ safeseq (reason: same reason as `force_escape`)
+ unordered_list (python-specific; not sure whether needed or not)
+ dictsort (python-specific; maybe one could add a filter to sort a list of structs by a specific field name)
+ dictsortreversed (see dictsort)
+*/
+
+import (
+ "bytes"
+ "fmt"
+ "math/rand"
+ "net/url"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+func init() {
+ rand.Seed(time.Now().Unix())
+
+ RegisterFilter("escape", filterEscape)
+ RegisterFilter("safe", filterSafe)
+ RegisterFilter("escapejs", filterEscapejs)
+
+ RegisterFilter("add", filterAdd)
+ RegisterFilter("addslashes", filterAddslashes)
+ RegisterFilter("capfirst", filterCapfirst)
+ RegisterFilter("center", filterCenter)
+ RegisterFilter("cut", filterCut)
+ RegisterFilter("date", filterDate)
+ RegisterFilter("default", filterDefault)
+ RegisterFilter("default_if_none", filterDefaultIfNone)
+ RegisterFilter("divisibleby", filterDivisibleby)
+ RegisterFilter("first", filterFirst)
+ RegisterFilter("floatformat", filterFloatformat)
+ RegisterFilter("get_digit", filterGetdigit)
+ RegisterFilter("iriencode", filterIriencode)
+ RegisterFilter("join", filterJoin)
+ RegisterFilter("last", filterLast)
+ RegisterFilter("length", filterLength)
+ RegisterFilter("length_is", filterLengthis)
+ RegisterFilter("linebreaks", filterLinebreaks)
+ RegisterFilter("linebreaksbr", filterLinebreaksbr)
+ RegisterFilter("linenumbers", filterLinenumbers)
+ RegisterFilter("ljust", filterLjust)
+ RegisterFilter("lower", filterLower)
+ RegisterFilter("make_list", filterMakelist)
+ RegisterFilter("phone2numeric", filterPhone2numeric)
+ RegisterFilter("pluralize", filterPluralize)
+ RegisterFilter("random", filterRandom)
+ RegisterFilter("removetags", filterRemovetags)
+ RegisterFilter("rjust", filterRjust)
+ RegisterFilter("slice", filterSlice)
+ RegisterFilter("stringformat", filterStringformat)
+ RegisterFilter("striptags", filterStriptags)
+ RegisterFilter("time", filterDate) // time uses filterDate (same golang-format)
+ RegisterFilter("title", filterTitle)
+ RegisterFilter("truncatechars", filterTruncatechars)
+ RegisterFilter("truncatechars_html", filterTruncatecharsHtml)
+ RegisterFilter("truncatewords", filterTruncatewords)
+ RegisterFilter("truncatewords_html", filterTruncatewordsHtml)
+ RegisterFilter("upper", filterUpper)
+ RegisterFilter("urlencode", filterUrlencode)
+ RegisterFilter("urlize", filterUrlize)
+ RegisterFilter("urlizetrunc", filterUrlizetrunc)
+ RegisterFilter("wordcount", filterWordcount)
+ RegisterFilter("wordwrap", filterWordwrap)
+ RegisterFilter("yesno", filterYesno)
+
+ RegisterFilter("float", filterFloat) // pongo-specific
+ RegisterFilter("integer", filterInteger) // pongo-specific
+}
+
+func filterTruncatecharsHelper(s string, newLen int) string {
+ runes := []rune(s)
+ if newLen < len(runes) {
+ if newLen >= 3 {
+ return fmt.Sprintf("%s...", string(runes[:newLen-3]))
+ }
+ // Not enough space for the ellipsis
+ return string(runes[:newLen])
+ }
+ return string(runes)
+}
+
+func filterTruncateHtmlHelper(value string, new_output *bytes.Buffer, cond func() bool, fn func(c rune, s int, idx int) int, finalize func()) {
+ vLen := len(value)
+ tag_stack := make([]string, 0)
+ idx := 0
+
+ for idx < vLen && !cond() {
+ c, s := utf8.DecodeRuneInString(value[idx:])
+ if c == utf8.RuneError {
+ idx += s
+ continue
+ }
+
+ if c == '<' {
+ new_output.WriteRune(c)
+ idx += s // consume "<"
+
+ if idx+1 < vLen {
+ if value[idx] == '/' {
+ // Close tag
+
+ new_output.WriteString("/")
+
+ tag := ""
+ idx += 1 // consume "/"
+
+ for idx < vLen {
+ c2, size2 := utf8.DecodeRuneInString(value[idx:])
+ if c2 == utf8.RuneError {
+ idx += size2
+ continue
+ }
+
+ // End of tag found
+ if c2 == '>' {
+ idx++ // consume ">"
+ break
+ }
+ tag += string(c2)
+ idx += size2
+ }
+
+ if len(tag_stack) > 0 {
+ // Ideally, the close tag is TOP of tag stack
+ // In malformed HTML, it must not be, so iterate through the stack and remove the tag
+ for i := len(tag_stack) - 1; i >= 0; i-- {
+ if tag_stack[i] == tag {
+ // Found the tag
+ tag_stack[i] = tag_stack[len(tag_stack)-1]
+ tag_stack = tag_stack[:len(tag_stack)-1]
+ break
+ }
+ }
+ }
+
+ new_output.WriteString(tag)
+ new_output.WriteString(">")
+ } else {
+ // Open tag
+
+ tag := ""
+
+ params := false
+ for idx < vLen {
+ c2, size2 := utf8.DecodeRuneInString(value[idx:])
+ if c2 == utf8.RuneError {
+ idx += size2
+ continue
+ }
+
+ new_output.WriteRune(c2)
+
+ // End of tag found
+ if c2 == '>' {
+ idx++ // consume ">"
+ break
+ }
+
+ if !params {
+ if c2 == ' ' {
+ params = true
+ } else {
+ tag += string(c2)
+ }
+ }
+
+ idx += size2
+ }
+
+ // Add tag to stack
+ tag_stack = append(tag_stack, tag)
+ }
+ }
+ } else {
+ idx = fn(c, s, idx)
+ }
+ }
+
+ finalize()
+
+ for i := len(tag_stack) - 1; i >= 0; i-- {
+ tag := tag_stack[i]
+ // Close everything from the regular tag stack
+ new_output.WriteString(fmt.Sprintf("%s>", tag))
+ }
+}
+
+func filterTruncatechars(in *Value, param *Value) (*Value, *Error) {
+ s := in.String()
+ newLen := param.Integer()
+ return AsValue(filterTruncatecharsHelper(s, newLen)), nil
+}
+
+func filterTruncatecharsHtml(in *Value, param *Value) (*Value, *Error) {
+ value := in.String()
+ newLen := max(param.Integer()-3, 0)
+
+ new_output := bytes.NewBuffer(nil)
+
+ textcounter := 0
+
+ filterTruncateHtmlHelper(value, new_output, func() bool {
+ return textcounter >= newLen
+ }, func(c rune, s int, idx int) int {
+ textcounter++
+ new_output.WriteRune(c)
+
+ return idx + s
+ }, func() {
+ if textcounter >= newLen && textcounter < len(value) {
+ new_output.WriteString("...")
+ }
+ })
+
+ return AsSafeValue(new_output.String()), nil
+}
+
+func filterTruncatewords(in *Value, param *Value) (*Value, *Error) {
+ words := strings.Fields(in.String())
+ n := param.Integer()
+ if n <= 0 {
+ return AsValue(""), nil
+ }
+ nlen := min(len(words), n)
+ out := make([]string, 0, nlen)
+ for i := 0; i < nlen; i++ {
+ out = append(out, words[i])
+ }
+
+ if n < len(words) {
+ out = append(out, "...")
+ }
+
+ return AsValue(strings.Join(out, " ")), nil
+}
+
+func filterTruncatewordsHtml(in *Value, param *Value) (*Value, *Error) {
+ value := in.String()
+ newLen := max(param.Integer(), 0)
+
+ new_output := bytes.NewBuffer(nil)
+
+ wordcounter := 0
+
+ filterTruncateHtmlHelper(value, new_output, func() bool {
+ return wordcounter >= newLen
+ }, func(_ rune, _ int, idx int) int {
+ // Get next word
+ word_found := false
+
+ for idx < len(value) {
+ c2, size2 := utf8.DecodeRuneInString(value[idx:])
+ if c2 == utf8.RuneError {
+ idx += size2
+ continue
+ }
+
+ if c2 == '<' {
+ // HTML tag start, don't consume it
+ return idx
+ }
+
+ new_output.WriteRune(c2)
+ idx += size2
+
+ if c2 == ' ' || c2 == '.' || c2 == ',' || c2 == ';' {
+ // Word ends here, stop capturing it now
+ break
+ } else {
+ word_found = true
+ }
+ }
+
+ if word_found {
+ wordcounter++
+ }
+
+ return idx
+ }, func() {
+ if wordcounter >= newLen {
+ new_output.WriteString("...")
+ }
+ })
+
+ return AsSafeValue(new_output.String()), nil
+}
+
+func filterEscape(in *Value, param *Value) (*Value, *Error) {
+ output := strings.Replace(in.String(), "&", "&", -1)
+ output = strings.Replace(output, ">", ">", -1)
+ output = strings.Replace(output, "<", "<", -1)
+ output = strings.Replace(output, "\"", """, -1)
+ output = strings.Replace(output, "'", "'", -1)
+ return AsValue(output), nil
+}
+
+func filterSafe(in *Value, param *Value) (*Value, *Error) {
+ return in, nil // nothing to do here, just to keep track of the safe application
+}
+
+func filterEscapejs(in *Value, param *Value) (*Value, *Error) {
+ sin := in.String()
+
+ var b bytes.Buffer
+
+ idx := 0
+ for idx < len(sin) {
+ c, size := utf8.DecodeRuneInString(sin[idx:])
+ if c == utf8.RuneError {
+ idx += size
+ continue
+ }
+
+ if c == '\\' {
+ // Escape seq?
+ if idx+1 < len(sin) {
+ switch sin[idx+1] {
+ case 'r':
+ b.WriteString(fmt.Sprintf(`\u%04X`, '\r'))
+ idx += 2
+ continue
+ case 'n':
+ b.WriteString(fmt.Sprintf(`\u%04X`, '\n'))
+ idx += 2
+ continue
+ /*case '\'':
+ b.WriteString(fmt.Sprintf(`\u%04X`, '\''))
+ idx += 2
+ continue
+ case '"':
+ b.WriteString(fmt.Sprintf(`\u%04X`, '"'))
+ idx += 2
+ continue*/
+ }
+ }
+ }
+
+ if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == ' ' || c == '/' {
+ b.WriteRune(c)
+ } else {
+ b.WriteString(fmt.Sprintf(`\u%04X`, c))
+ }
+
+ idx += size
+ }
+
+ return AsValue(b.String()), nil
+}
+
+func filterAdd(in *Value, param *Value) (*Value, *Error) {
+ if in.IsNumber() && param.IsNumber() {
+ if in.IsFloat() || param.IsFloat() {
+ return AsValue(in.Float() + param.Float()), nil
+ } else {
+ return AsValue(in.Integer() + param.Integer()), nil
+ }
+ }
+ // If in/param is not a number, we're relying on the
+ // Value's String() convertion and just add them both together
+ return AsValue(in.String() + param.String()), nil
+}
+
+func filterAddslashes(in *Value, param *Value) (*Value, *Error) {
+ output := strings.Replace(in.String(), "\\", "\\\\", -1)
+ output = strings.Replace(output, "\"", "\\\"", -1)
+ output = strings.Replace(output, "'", "\\'", -1)
+ return AsValue(output), nil
+}
+
+func filterCut(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(strings.Replace(in.String(), param.String(), "", -1)), nil
+}
+
+func filterLength(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(in.Len()), nil
+}
+
+func filterLengthis(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(in.Len() == param.Integer()), nil
+}
+
+func filterDefault(in *Value, param *Value) (*Value, *Error) {
+ if !in.IsTrue() {
+ return param, nil
+ }
+ return in, nil
+}
+
+func filterDefaultIfNone(in *Value, param *Value) (*Value, *Error) {
+ if in.IsNil() {
+ return param, nil
+ }
+ return in, nil
+}
+
+func filterDivisibleby(in *Value, param *Value) (*Value, *Error) {
+ if param.Integer() == 0 {
+ return AsValue(false), nil
+ }
+ return AsValue(in.Integer()%param.Integer() == 0), nil
+}
+
+func filterFirst(in *Value, param *Value) (*Value, *Error) {
+ if in.CanSlice() && in.Len() > 0 {
+ return in.Index(0), nil
+ }
+ return AsValue(""), nil
+}
+
+func filterFloatformat(in *Value, param *Value) (*Value, *Error) {
+ val := in.Float()
+
+ decimals := -1
+ if !param.IsNil() {
+ // Any argument provided?
+ decimals = param.Integer()
+ }
+
+ // if the argument is not a number (e. g. empty), the default
+ // behaviour is trim the result
+ trim := !param.IsNumber()
+
+ if decimals <= 0 {
+ // argument is negative or zero, so we
+ // want the output being trimmed
+ decimals = -decimals
+ trim = true
+ }
+
+ if trim {
+ // Remove zeroes
+ if float64(int(val)) == val {
+ return AsValue(in.Integer()), nil
+ }
+ }
+
+ return AsValue(strconv.FormatFloat(val, 'f', decimals, 64)), nil
+}
+
+func filterGetdigit(in *Value, param *Value) (*Value, *Error) {
+ i := param.Integer()
+ l := len(in.String()) // do NOT use in.Len() here!
+ if i <= 0 || i > l {
+ return in, nil
+ }
+ return AsValue(in.String()[l-i] - 48), nil
+}
+
+const filterIRIChars = "/#%[]=:;$&()+,!?*@'~"
+
+func filterIriencode(in *Value, param *Value) (*Value, *Error) {
+ var b bytes.Buffer
+
+ sin := in.String()
+ for _, r := range sin {
+ if strings.IndexRune(filterIRIChars, r) >= 0 {
+ b.WriteRune(r)
+ } else {
+ b.WriteString(url.QueryEscape(string(r)))
+ }
+ }
+
+ return AsValue(b.String()), nil
+}
+
+func filterJoin(in *Value, param *Value) (*Value, *Error) {
+ if !in.CanSlice() {
+ return in, nil
+ }
+ sep := param.String()
+ sl := make([]string, 0, in.Len())
+ for i := 0; i < in.Len(); i++ {
+ sl = append(sl, in.Index(i).String())
+ }
+ return AsValue(strings.Join(sl, sep)), nil
+}
+
+func filterLast(in *Value, param *Value) (*Value, *Error) {
+ if in.CanSlice() && in.Len() > 0 {
+ return in.Index(in.Len() - 1), nil
+ }
+ return AsValue(""), nil
+}
+
+func filterUpper(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(strings.ToUpper(in.String())), nil
+}
+
+func filterLower(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(strings.ToLower(in.String())), nil
+}
+
+func filterMakelist(in *Value, param *Value) (*Value, *Error) {
+ s := in.String()
+ result := make([]string, 0, len(s))
+ for _, c := range s {
+ result = append(result, string(c))
+ }
+ return AsValue(result), nil
+}
+
+func filterCapfirst(in *Value, param *Value) (*Value, *Error) {
+ if in.Len() <= 0 {
+ return AsValue(""), nil
+ }
+ t := in.String()
+ r, size := utf8.DecodeRuneInString(t)
+ return AsValue(strings.ToUpper(string(r)) + t[size:]), nil
+}
+
+func filterCenter(in *Value, param *Value) (*Value, *Error) {
+ width := param.Integer()
+ slen := in.Len()
+ if width <= slen {
+ return in, nil
+ }
+
+ spaces := width - slen
+ left := spaces/2 + spaces%2
+ right := spaces / 2
+
+ return AsValue(fmt.Sprintf("%s%s%s", strings.Repeat(" ", left),
+ in.String(), strings.Repeat(" ", right))), nil
+}
+
+func filterDate(in *Value, param *Value) (*Value, *Error) {
+ t, is_time := in.Interface().(time.Time)
+ if !is_time {
+ return nil, &Error{
+ Sender: "filter:date",
+ ErrorMsg: "Filter input argument must be of type 'time.Time'.",
+ }
+ }
+ return AsValue(t.Format(param.String())), nil
+}
+
+func filterFloat(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(in.Float()), nil
+}
+
+func filterInteger(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(in.Integer()), nil
+}
+
+func filterLinebreaks(in *Value, param *Value) (*Value, *Error) {
+ if in.Len() == 0 {
+ return in, nil
+ }
+
+ var b bytes.Buffer
+
+ // Newline =
+ // Double newline = ...
+ lines := strings.Split(in.String(), "\n")
+ lenlines := len(lines)
+
+ opened := false
+
+ for idx, line := range lines {
+
+ if !opened {
+ b.WriteString("")
+ opened = true
+ }
+
+ b.WriteString(line)
+
+ if idx < lenlines-1 && strings.TrimSpace(lines[idx]) != "" {
+ // We've not reached the end
+ if strings.TrimSpace(lines[idx+1]) == "" {
+ // Next line is empty
+ if opened {
+ b.WriteString("
")
+ opened = false
+ }
+ } else {
+ b.WriteString("
")
+ }
+ }
+ }
+
+ if opened {
+ b.WriteString("
")
+ }
+
+ return AsValue(b.String()), nil
+}
+
+func filterLinebreaksbr(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(strings.Replace(in.String(), "\n", "
", -1)), nil
+}
+
+func filterLinenumbers(in *Value, param *Value) (*Value, *Error) {
+ lines := strings.Split(in.String(), "\n")
+ output := make([]string, 0, len(lines))
+ for idx, line := range lines {
+ output = append(output, fmt.Sprintf("%d. %s", idx+1, line))
+ }
+ return AsValue(strings.Join(output, "\n")), nil
+}
+
+func filterLjust(in *Value, param *Value) (*Value, *Error) {
+ times := param.Integer() - in.Len()
+ if times < 0 {
+ times = 0
+ }
+ return AsValue(fmt.Sprintf("%s%s", in.String(), strings.Repeat(" ", times))), nil
+}
+
+func filterUrlencode(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(url.QueryEscape(in.String())), nil
+}
+
+// TODO: This regexp could do some work
+var filterUrlizeURLRegexp = regexp.MustCompile(`((((http|https)://)|www\.|((^|[ ])[0-9A-Za-z_\-]+(\.com|\.net|\.org|\.info|\.biz|\.de))))(?U:.*)([ ]+|$)`)
+var filterUrlizeEmailRegexp = regexp.MustCompile(`(\w+@\w+\.\w{2,4})`)
+
+func filterUrlizeHelper(input string, autoescape bool, trunc int) string {
+ sout := filterUrlizeURLRegexp.ReplaceAllStringFunc(input, func(raw_url string) string {
+ var prefix string
+ var suffix string
+ if strings.HasPrefix(raw_url, " ") {
+ prefix = " "
+ }
+ if strings.HasSuffix(raw_url, " ") {
+ suffix = " "
+ }
+
+ raw_url = strings.TrimSpace(raw_url)
+
+ t, err := ApplyFilter("iriencode", AsValue(raw_url), nil)
+ if err != nil {
+ panic(err)
+ }
+ url := t.String()
+
+ if !strings.HasPrefix(url, "http") {
+ url = fmt.Sprintf("http://%s", url)
+ }
+
+ title := raw_url
+
+ if trunc > 3 && len(title) > trunc {
+ title = fmt.Sprintf("%s...", title[:trunc-3])
+ }
+
+ if autoescape {
+ t, err := ApplyFilter("escape", AsValue(title), nil)
+ if err != nil {
+ panic(err)
+ }
+ title = t.String()
+ }
+
+ return fmt.Sprintf(`%s%s%s`, prefix, url, title, suffix)
+ })
+
+ sout = filterUrlizeEmailRegexp.ReplaceAllStringFunc(sout, func(mail string) string {
+
+ title := mail
+
+ if trunc > 3 && len(title) > trunc {
+ title = fmt.Sprintf("%s...", title[:trunc-3])
+ }
+
+ return fmt.Sprintf(`%s`, mail, title)
+ })
+
+ return sout
+}
+
+func filterUrlize(in *Value, param *Value) (*Value, *Error) {
+ autoescape := true
+ if param.IsBool() {
+ autoescape = param.Bool()
+ }
+
+ return AsValue(filterUrlizeHelper(in.String(), autoescape, -1)), nil
+}
+
+func filterUrlizetrunc(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(filterUrlizeHelper(in.String(), true, param.Integer())), nil
+}
+
+func filterStringformat(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(fmt.Sprintf(param.String(), in.Interface())), nil
+}
+
+var re_striptags = regexp.MustCompile("<[^>]*?>")
+
+func filterStriptags(in *Value, param *Value) (*Value, *Error) {
+ s := in.String()
+
+ // Strip all tags
+ s = re_striptags.ReplaceAllString(s, "")
+
+ return AsValue(strings.TrimSpace(s)), nil
+}
+
+// https://en.wikipedia.org/wiki/Phoneword
+var filterPhone2numericMap = map[string]string{
+ "a": "2", "b": "2", "c": "2", "d": "3", "e": "3", "f": "3", "g": "4", "h": "4", "i": "4", "j": "5", "k": "5",
+ "l": "5", "m": "6", "n": "6", "o": "6", "p": "7", "q": "7", "r": "7", "s": "7", "t": "8", "u": "8", "v": "8",
+ "w": "9", "x": "9", "y": "9", "z": "9",
+}
+
+func filterPhone2numeric(in *Value, param *Value) (*Value, *Error) {
+ sin := in.String()
+ for k, v := range filterPhone2numericMap {
+ sin = strings.Replace(sin, k, v, -1)
+ sin = strings.Replace(sin, strings.ToUpper(k), v, -1)
+ }
+ return AsValue(sin), nil
+}
+
+func filterPluralize(in *Value, param *Value) (*Value, *Error) {
+ if in.IsNumber() {
+ // Works only on numbers
+ if param.Len() > 0 {
+ endings := strings.Split(param.String(), ",")
+ if len(endings) > 2 {
+ return nil, &Error{
+ Sender: "filter:pluralize",
+ ErrorMsg: "You cannot pass more than 2 arguments to filter 'pluralize'.",
+ }
+ }
+ if len(endings) == 1 {
+ // 1 argument
+ if in.Integer() != 1 {
+ return AsValue(endings[0]), nil
+ }
+ } else {
+ if in.Integer() != 1 {
+ // 2 arguments
+ return AsValue(endings[1]), nil
+ }
+ return AsValue(endings[0]), nil
+ }
+ } else {
+ if in.Integer() != 1 {
+ // return default 's'
+ return AsValue("s"), nil
+ }
+ }
+
+ return AsValue(""), nil
+ } else {
+ return nil, &Error{
+ Sender: "filter:pluralize",
+ ErrorMsg: "Filter 'pluralize' does only work on numbers.",
+ }
+ }
+}
+
+func filterRandom(in *Value, param *Value) (*Value, *Error) {
+ if !in.CanSlice() || in.Len() <= 0 {
+ return in, nil
+ }
+ i := rand.Intn(in.Len())
+ return in.Index(i), nil
+}
+
+func filterRemovetags(in *Value, param *Value) (*Value, *Error) {
+ s := in.String()
+ tags := strings.Split(param.String(), ",")
+
+ // Strip only specific tags
+ for _, tag := range tags {
+ re := regexp.MustCompile(fmt.Sprintf("?%s/?>", tag))
+ s = re.ReplaceAllString(s, "")
+ }
+
+ return AsValue(strings.TrimSpace(s)), nil
+}
+
+func filterRjust(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(fmt.Sprintf(fmt.Sprintf("%%%ds", param.Integer()), in.String())), nil
+}
+
+func filterSlice(in *Value, param *Value) (*Value, *Error) {
+ comp := strings.Split(param.String(), ":")
+ if len(comp) != 2 {
+ return nil, &Error{
+ Sender: "filter:slice",
+ ErrorMsg: "Slice string must have the format 'from:to' [from/to can be omitted, but the ':' is required]",
+ }
+ }
+
+ if !in.CanSlice() {
+ return in, nil
+ }
+
+ from := AsValue(comp[0]).Integer()
+ to := in.Len()
+
+ if from > to {
+ from = to
+ }
+
+ vto := AsValue(comp[1]).Integer()
+ if vto >= from && vto <= in.Len() {
+ to = vto
+ }
+
+ return in.Slice(from, to), nil
+}
+
+func filterTitle(in *Value, param *Value) (*Value, *Error) {
+ if !in.IsString() {
+ return AsValue(""), nil
+ }
+ return AsValue(strings.Title(strings.ToLower(in.String()))), nil
+}
+
+func filterWordcount(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(len(strings.Fields(in.String()))), nil
+}
+
+func filterWordwrap(in *Value, param *Value) (*Value, *Error) {
+ words := strings.Fields(in.String())
+ words_len := len(words)
+ wrap_at := param.Integer()
+ if wrap_at <= 0 {
+ return in, nil
+ }
+
+ linecount := words_len/wrap_at + words_len%wrap_at
+ lines := make([]string, 0, linecount)
+ for i := 0; i < linecount; i++ {
+ lines = append(lines, strings.Join(words[wrap_at*i:min(wrap_at*(i+1), words_len)], " "))
+ }
+ return AsValue(strings.Join(lines, "\n")), nil
+}
+
+func filterYesno(in *Value, param *Value) (*Value, *Error) {
+ choices := map[int]string{
+ 0: "yes",
+ 1: "no",
+ 2: "maybe",
+ }
+ param_string := param.String()
+ custom_choices := strings.Split(param_string, ",")
+ if len(param_string) > 0 {
+ if len(custom_choices) > 3 {
+ return nil, &Error{
+ Sender: "filter:yesno",
+ ErrorMsg: fmt.Sprintf("You cannot pass more than 3 options to the 'yesno'-filter (got: '%s').", param_string),
+ }
+ }
+ if len(custom_choices) < 2 {
+ return nil, &Error{
+ Sender: "filter:yesno",
+ ErrorMsg: fmt.Sprintf("You must pass either no or at least 2 arguments to the 'yesno'-filter (got: '%s').", param_string),
+ }
+ }
+
+ // Map to the options now
+ choices[0] = custom_choices[0]
+ choices[1] = custom_choices[1]
+ if len(custom_choices) == 3 {
+ choices[2] = custom_choices[2]
+ }
+ }
+
+ // maybe
+ if in.IsNil() {
+ return AsValue(choices[2]), nil
+ }
+
+ // yes
+ if in.IsTrue() {
+ return AsValue(choices[0]), nil
+ }
+
+ // no
+ return AsValue(choices[1]), nil
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/helpers.go b/Godeps/_workspace/src/github.com/flosch/pongo2/helpers.go
new file mode 100644
index 0000000..880dbc0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/helpers.go
@@ -0,0 +1,15 @@
+package pongo2
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/lexer.go b/Godeps/_workspace/src/github.com/flosch/pongo2/lexer.go
new file mode 100644
index 0000000..8956f9c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/lexer.go
@@ -0,0 +1,421 @@
+package pongo2
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+)
+
+const (
+ TokenError = iota
+ EOF
+
+ TokenHTML
+
+ TokenKeyword
+ TokenIdentifier
+ TokenString
+ TokenNumber
+ TokenSymbol
+)
+
+var (
+ tokenSpaceChars = " \n\r\t"
+ tokenIdentifierChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_"
+ tokenIdentifierCharsWithDigits = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789"
+ tokenDigits = "0123456789"
+
+ // Available symbols in pongo2 (within filters/tag)
+ TokenSymbols = []string{
+ // 3-Char symbols
+
+ // 2-Char symbols
+ "==", ">=", "<=", "&&", "||", "{{", "}}", "{%", "%}", "!=", "<>",
+
+ // 1-Char symbol
+ "(", ")", "+", "-", "*", "<", ">", "/", "^", ",", ".", "!", "|", ":", "=", "%",
+ }
+
+ // Available keywords in pongo2
+ TokenKeywords = []string{"in", "and", "or", "not", "true", "false", "as", "export"}
+)
+
+type TokenType int
+type Token struct {
+ Filename string
+ Typ TokenType
+ Val string
+ Line int
+ Col int
+}
+
+type lexerStateFn func() lexerStateFn
+type lexer struct {
+ name string
+ input string
+ start int // start pos of the item
+ pos int // current pos
+ width int // width of last rune
+ tokens []*Token
+ errored bool
+ startline int
+ startcol int
+ line int
+ col int
+
+ in_verbatim bool
+ verbatim_name string
+}
+
+func (t *Token) String() string {
+ val := t.Val
+ if len(val) > 1000 {
+ val = fmt.Sprintf("%s...%s", val[:10], val[len(val)-5:len(val)])
+ }
+
+ typ := ""
+ switch t.Typ {
+ case TokenHTML:
+ typ = "HTML"
+ case TokenError:
+ typ = "Error"
+ case TokenIdentifier:
+ typ = "Identifier"
+ case TokenKeyword:
+ typ = "Keyword"
+ case TokenNumber:
+ typ = "Number"
+ case TokenString:
+ typ = "String"
+ case TokenSymbol:
+ typ = "Symbol"
+ default:
+ typ = "Unknown"
+ }
+
+ return fmt.Sprintf("",
+ typ, t.Typ, val, t.Line, t.Col)
+}
+
+func lex(name string, input string) ([]*Token, *Error) {
+ l := &lexer{
+ name: name,
+ input: input,
+ tokens: make([]*Token, 0, 100),
+ line: 1,
+ col: 1,
+ startline: 1,
+ startcol: 1,
+ }
+ l.run()
+ if l.errored {
+ errtoken := l.tokens[len(l.tokens)-1]
+ return nil, &Error{
+ Filename: name,
+ Line: errtoken.Line,
+ Column: errtoken.Col,
+ Sender: "lexer",
+ ErrorMsg: errtoken.Val,
+ }
+ }
+ return l.tokens, nil
+}
+
+func (l *lexer) value() string {
+ return l.input[l.start:l.pos]
+}
+
+func (l *lexer) length() int {
+ return l.pos - l.start
+}
+
+func (l *lexer) emit(t TokenType) {
+ tok := &Token{
+ Filename: l.name,
+ Typ: t,
+ Val: l.value(),
+ Line: l.startline,
+ Col: l.startcol,
+ }
+
+ if t == TokenString {
+ // Escape sequence \" in strings
+ tok.Val = strings.Replace(tok.Val, `\"`, `"`, -1)
+ tok.Val = strings.Replace(tok.Val, `\\`, `\`, -1)
+ }
+
+ l.tokens = append(l.tokens, tok)
+ l.start = l.pos
+ l.startline = l.line
+ l.startcol = l.col
+}
+
+func (l *lexer) next() rune {
+ if l.pos >= len(l.input) {
+ l.width = 0
+ return EOF
+ }
+ r, w := utf8.DecodeRuneInString(l.input[l.pos:])
+ l.width = w
+ l.pos += l.width
+ l.col += l.width
+ return r
+}
+
+func (l *lexer) backup() {
+ l.pos -= l.width
+ l.col -= l.width
+}
+
+func (l *lexer) peek() rune {
+ r := l.next()
+ l.backup()
+ return r
+}
+
+func (l *lexer) ignore() {
+ l.start = l.pos
+ l.startline = l.line
+ l.startcol = l.col
+}
+
+func (l *lexer) accept(what string) bool {
+ if strings.IndexRune(what, l.next()) >= 0 {
+ return true
+ }
+ l.backup()
+ return false
+}
+
+func (l *lexer) acceptRun(what string) {
+ for strings.IndexRune(what, l.next()) >= 0 {
+ }
+ l.backup()
+}
+
+func (l *lexer) errorf(format string, args ...interface{}) lexerStateFn {
+ t := &Token{
+ Filename: l.name,
+ Typ: TokenError,
+ Val: fmt.Sprintf(format, args...),
+ Line: l.startline,
+ Col: l.startcol,
+ }
+ l.tokens = append(l.tokens, t)
+ l.errored = true
+ l.startline = l.line
+ l.startcol = l.col
+ return nil
+}
+
+func (l *lexer) eof() bool {
+ return l.start >= len(l.input)-1
+}
+
+func (l *lexer) run() {
+ for {
+ // TODO: Support verbatim tag names
+ // https://docs.djangoproject.com/en/dev/ref/templates/builtins/#verbatim
+ if l.in_verbatim {
+ name := l.verbatim_name
+ if name != "" {
+ name += " "
+ }
+ if strings.HasPrefix(l.input[l.pos:], fmt.Sprintf("{%% endverbatim %s%%}", name)) { // end verbatim
+ if l.pos > l.start {
+ l.emit(TokenHTML)
+ }
+ w := len("{% endverbatim %}")
+ l.pos += w
+ l.col += w
+ l.ignore()
+ l.in_verbatim = false
+ }
+ } else if strings.HasPrefix(l.input[l.pos:], "{% verbatim %}") { // tag
+ if l.pos > l.start {
+ l.emit(TokenHTML)
+ }
+ l.in_verbatim = true
+ w := len("{% verbatim %}")
+ l.pos += w
+ l.col += w
+ l.ignore()
+ }
+
+ if !l.in_verbatim {
+ // Ignore single-line comments {# ... #}
+ if strings.HasPrefix(l.input[l.pos:], "{#") {
+ if l.pos > l.start {
+ l.emit(TokenHTML)
+ }
+
+ l.pos += 2 // pass '{#'
+ l.col += 2
+
+ for {
+ switch l.peek() {
+ case EOF:
+ l.errorf("Single-line comment not closed.")
+ return
+ case '\n':
+ l.errorf("Newline not permitted in a single-line comment.")
+ return
+ }
+
+ if strings.HasPrefix(l.input[l.pos:], "#}") {
+ l.pos += 2 // pass '#}'
+ l.col += 2
+ break
+ }
+
+ l.next()
+ }
+ l.ignore() // ignore whole comment
+
+ // Comment skipped
+ continue // next token
+ }
+
+ if strings.HasPrefix(l.input[l.pos:], "{{") || // variable
+ strings.HasPrefix(l.input[l.pos:], "{%") { // tag
+ if l.pos > l.start {
+ l.emit(TokenHTML)
+ }
+ l.tokenize()
+ if l.errored {
+ return
+ }
+ continue
+ }
+ }
+
+ switch l.peek() {
+ case '\n':
+ l.line++
+ l.col = 0
+ }
+ if l.next() == EOF {
+ break
+ }
+ }
+
+ if l.pos > l.start {
+ l.emit(TokenHTML)
+ }
+
+ if l.in_verbatim {
+ l.errorf("verbatim-tag not closed, got EOF.")
+ }
+}
+
+func (l *lexer) tokenize() {
+ for state := l.stateCode; state != nil; {
+ state = state()
+ }
+}
+
+func (l *lexer) stateCode() lexerStateFn {
+outer_loop:
+ for {
+ switch {
+ case l.accept(tokenSpaceChars):
+ if l.value() == "\n" {
+ return l.errorf("Newline not allowed within tag/variable.")
+ }
+ l.ignore()
+ continue
+ case l.accept(tokenIdentifierChars):
+ return l.stateIdentifier
+ case l.accept(tokenDigits):
+ return l.stateNumber
+ case l.accept(`"`):
+ return l.stateString
+ }
+
+ // Check for symbol
+ for _, sym := range TokenSymbols {
+ if strings.HasPrefix(l.input[l.start:], sym) {
+ l.pos += len(sym)
+ l.col += l.length()
+ l.emit(TokenSymbol)
+
+ if sym == "%}" || sym == "}}" {
+ // Tag/variable end, return after emit
+ return nil
+ }
+
+ continue outer_loop
+ }
+ }
+
+ if l.pos < len(l.input) {
+ return l.errorf("Unknown character: %q (%d)", l.peek(), l.peek())
+ }
+
+ break
+ }
+
+ // Normal shut down
+ return nil
+}
+
+func (l *lexer) stateIdentifier() lexerStateFn {
+ l.acceptRun(tokenIdentifierChars)
+ l.acceptRun(tokenIdentifierCharsWithDigits)
+ for _, kw := range TokenKeywords {
+ if kw == l.value() {
+ l.emit(TokenKeyword)
+ return l.stateCode
+ }
+ }
+ l.emit(TokenIdentifier)
+ return l.stateCode
+}
+
+func (l *lexer) stateNumber() lexerStateFn {
+ l.acceptRun(tokenDigits)
+ /*
+ Maybe context-sensitive number lexing?
+ * comments.0.Text // first comment
+ * usercomments.1.0 // second user, first comment
+ * if (score >= 8.5) // 8.5 as a number
+
+ if l.peek() == '.' {
+ l.accept(".")
+ if !l.accept(tokenDigits) {
+ return l.errorf("Malformed number.")
+ }
+ l.acceptRun(tokenDigits)
+ }
+ */
+ l.emit(TokenNumber)
+ return l.stateCode
+}
+
+func (l *lexer) stateString() lexerStateFn {
+ l.ignore()
+ l.startcol -= 1 // we're starting the position at the first "
+ for !l.accept(`"`) {
+ switch l.next() {
+ case '\\':
+ // escape sequence
+ switch l.peek() {
+ case '"', '\\':
+ l.next()
+ default:
+ return l.errorf("Unknown escape sequence: \\%c", l.peek())
+ }
+ case EOF:
+ return l.errorf("Unexpected EOF, string not closed.")
+ case '\n':
+ return l.errorf("Newline in string is not allowed.")
+ }
+ }
+ l.backup()
+ l.emit(TokenString)
+
+ l.next()
+ l.ignore()
+
+ return l.stateCode
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/nodes.go b/Godeps/_workspace/src/github.com/flosch/pongo2/nodes.go
new file mode 100644
index 0000000..5b039cd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/nodes.go
@@ -0,0 +1,16 @@
+package pongo2
+
+// The root document
+type nodeDocument struct {
+ Nodes []INode
+}
+
+func (doc *nodeDocument) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ for _, n := range doc.Nodes {
+ err := n.Execute(ctx, writer)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/nodes_html.go b/Godeps/_workspace/src/github.com/flosch/pongo2/nodes_html.go
new file mode 100644
index 0000000..9680285
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/nodes_html.go
@@ -0,0 +1,10 @@
+package pongo2
+
+type nodeHTML struct {
+ token *Token
+}
+
+func (n *nodeHTML) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ writer.WriteString(n.token.Val)
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/nodes_wrapper.go b/Godeps/_workspace/src/github.com/flosch/pongo2/nodes_wrapper.go
new file mode 100644
index 0000000..d1bcb8d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/nodes_wrapper.go
@@ -0,0 +1,16 @@
+package pongo2
+
+type NodeWrapper struct {
+ Endtag string
+ nodes []INode
+}
+
+func (wrapper *NodeWrapper) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ for _, n := range wrapper.nodes {
+ err := n.Execute(ctx, writer)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/parser.go b/Godeps/_workspace/src/github.com/flosch/pongo2/parser.go
new file mode 100644
index 0000000..ea876e5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/parser.go
@@ -0,0 +1,266 @@
+package pongo2
+
+import (
+ "fmt"
+ "strings"
+)
+
+type INode interface {
+ Execute(*ExecutionContext, TemplateWriter) *Error
+}
+
+type IEvaluator interface {
+ INode
+ GetPositionToken() *Token
+ Evaluate(*ExecutionContext) (*Value, *Error)
+ FilterApplied(name string) bool
+}
+
+// The parser provides you a comprehensive and easy tool to
+// work with the template document and arguments provided by
+// the user for your custom tag.
+//
+// The parser works on a token list which will be provided by pongo2.
+// A token is a unit you can work with. Tokens are either of type identifier,
+// string, number, keyword, HTML or symbol.
+//
+// (See Token's documentation for more about tokens)
+type Parser struct {
+ name string
+ idx int
+ tokens []*Token
+ last_token *Token
+
+ // if the parser parses a template document, here will be
+ // a reference to it (needed to access the template through Tags)
+ template *Template
+}
+
+// Creates a new parser to parse tokens.
+// Used inside pongo2 to parse documents and to provide an easy-to-use
+// parser for tag authors
+func newParser(name string, tokens []*Token, template *Template) *Parser {
+ p := &Parser{
+ name: name,
+ tokens: tokens,
+ template: template,
+ }
+ if len(tokens) > 0 {
+ p.last_token = tokens[len(tokens)-1]
+ }
+ return p
+}
+
+// Consume one token. It will be gone forever.
+func (p *Parser) Consume() {
+ p.ConsumeN(1)
+}
+
+// Consume N tokens. They will be gone forever.
+func (p *Parser) ConsumeN(count int) {
+ p.idx += count
+}
+
+// Returns the current token.
+func (p *Parser) Current() *Token {
+ return p.Get(p.idx)
+}
+
+// Returns the CURRENT token if the given type matches.
+// Consumes this token on success.
+func (p *Parser) MatchType(typ TokenType) *Token {
+ if t := p.PeekType(typ); t != nil {
+ p.Consume()
+ return t
+ }
+ return nil
+}
+
+// Returns the CURRENT token if the given type AND value matches.
+// Consumes this token on success.
+func (p *Parser) Match(typ TokenType, val string) *Token {
+ if t := p.Peek(typ, val); t != nil {
+ p.Consume()
+ return t
+ }
+ return nil
+}
+
+// Returns the CURRENT token if the given type AND *one* of
+// the given values matches.
+// Consumes this token on success.
+func (p *Parser) MatchOne(typ TokenType, vals ...string) *Token {
+ for _, val := range vals {
+ if t := p.Peek(typ, val); t != nil {
+ p.Consume()
+ return t
+ }
+ }
+ return nil
+}
+
+// Returns the CURRENT token if the given type matches.
+// It DOES NOT consume the token.
+func (p *Parser) PeekType(typ TokenType) *Token {
+ return p.PeekTypeN(0, typ)
+}
+
+// Returns the CURRENT token if the given type AND value matches.
+// It DOES NOT consume the token.
+func (p *Parser) Peek(typ TokenType, val string) *Token {
+ return p.PeekN(0, typ, val)
+}
+
+// Returns the CURRENT token if the given type AND *one* of
+// the given values matches.
+// It DOES NOT consume the token.
+func (p *Parser) PeekOne(typ TokenType, vals ...string) *Token {
+ for _, v := range vals {
+ t := p.PeekN(0, typ, v)
+ if t != nil {
+ return t
+ }
+ }
+ return nil
+}
+
+// Returns the tokens[current position + shift] token if the
+// given type AND value matches for that token.
+// DOES NOT consume the token.
+func (p *Parser) PeekN(shift int, typ TokenType, val string) *Token {
+ t := p.Get(p.idx + shift)
+ if t != nil {
+ if t.Typ == typ && t.Val == val {
+ return t
+ }
+ }
+ return nil
+}
+
+// Returns the tokens[current position + shift] token if the given type matches.
+// DOES NOT consume the token for that token.
+func (p *Parser) PeekTypeN(shift int, typ TokenType) *Token {
+ t := p.Get(p.idx + shift)
+ if t != nil {
+ if t.Typ == typ {
+ return t
+ }
+ }
+ return nil
+}
+
+// Returns the UNCONSUMED token count.
+func (p *Parser) Remaining() int {
+ return len(p.tokens) - p.idx
+}
+
+// Returns the total token count.
+func (p *Parser) Count() int {
+ return len(p.tokens)
+}
+
+// Returns tokens[i] or NIL (if i >= len(tokens))
+func (p *Parser) Get(i int) *Token {
+ if i < len(p.tokens) {
+ return p.tokens[i]
+ }
+ return nil
+}
+
+// Returns tokens[current-position + shift] or NIL
+// (if (current-position + i) >= len(tokens))
+func (p *Parser) GetR(shift int) *Token {
+ i := p.idx + shift
+ return p.Get(i)
+}
+
+// Produces a nice error message and returns an error-object.
+// The 'token'-argument is optional. If provided, it will take
+// the token's position information. If not provided, it will
+// automatically use the CURRENT token's position information.
+func (p *Parser) Error(msg string, token *Token) *Error {
+ if token == nil {
+ // Set current token
+ token = p.Current()
+ if token == nil {
+ // Set to last token
+ if len(p.tokens) > 0 {
+ token = p.tokens[len(p.tokens)-1]
+ }
+ }
+ }
+ var line, col int
+ if token != nil {
+ line = token.Line
+ col = token.Col
+ }
+ return &Error{
+ Template: p.template,
+ Filename: p.name,
+ Sender: "parser",
+ Line: line,
+ Column: col,
+ Token: token,
+ ErrorMsg: msg,
+ }
+}
+
+// Wraps all nodes between starting tag and "{% endtag %}" and provides
+// one simple interface to execute the wrapped nodes.
+// It returns a parser to process provided arguments to the tag.
+func (p *Parser) WrapUntilTag(names ...string) (*NodeWrapper, *Parser, *Error) {
+ wrapper := &NodeWrapper{}
+
+ tagArgs := make([]*Token, 0)
+
+ for p.Remaining() > 0 {
+ // New tag, check whether we have to stop wrapping here
+ if p.Peek(TokenSymbol, "{%") != nil {
+ tag_ident := p.PeekTypeN(1, TokenIdentifier)
+
+ if tag_ident != nil {
+ // We've found a (!) end-tag
+
+ found := false
+ for _, n := range names {
+ if tag_ident.Val == n {
+ found = true
+ break
+ }
+ }
+
+ // We only process the tag if we've found an end tag
+ if found {
+ // Okay, endtag found.
+ p.ConsumeN(2) // '{%' tagname
+
+ for {
+ if p.Match(TokenSymbol, "%}") != nil {
+ // Okay, end the wrapping here
+ wrapper.Endtag = tag_ident.Val
+ return wrapper, newParser(p.template.name, tagArgs, p.template), nil
+ } else {
+ t := p.Current()
+ p.Consume()
+ if t == nil {
+ return nil, nil, p.Error("Unexpected EOF.", p.last_token)
+ }
+ tagArgs = append(tagArgs, t)
+ }
+ }
+ }
+ }
+
+ }
+
+ // Otherwise process next element to be wrapped
+ node, err := p.parseDocElement()
+ if err != nil {
+ return nil, nil, err
+ }
+ wrapper.nodes = append(wrapper.nodes, node)
+ }
+
+ return nil, nil, p.Error(fmt.Sprintf("Unexpected EOF, expected tag %s.", strings.Join(names, " or ")),
+ p.last_token)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/parser_document.go b/Godeps/_workspace/src/github.com/flosch/pongo2/parser_document.go
new file mode 100644
index 0000000..4ab8b93
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/parser_document.go
@@ -0,0 +1,54 @@
+package pongo2
+
+// Doc = { ( Filter | Tag | HTML ) }
+func (p *Parser) parseDocElement() (INode, *Error) {
+ t := p.Current()
+
+ switch t.Typ {
+ case TokenHTML:
+ p.Consume() // consume HTML element
+ return &nodeHTML{token: t}, nil
+ case TokenSymbol:
+ switch t.Val {
+ case "{{":
+ // parse variable
+ variable, err := p.parseVariableElement()
+ if err != nil {
+ return nil, err
+ }
+ return variable, nil
+ case "{%":
+ // parse tag
+ tag, err := p.parseTagElement()
+ if err != nil {
+ return nil, err
+ }
+ return tag, nil
+ }
+ }
+ return nil, p.Error("Unexpected token (only HTML/tags/filters in templates allowed)", t)
+}
+
+func (tpl *Template) parse() *Error {
+ tpl.parser = newParser(tpl.name, tpl.tokens, tpl)
+ doc, err := tpl.parser.parseDocument()
+ if err != nil {
+ return err
+ }
+ tpl.root = doc
+ return nil
+}
+
+func (p *Parser) parseDocument() (*nodeDocument, *Error) {
+ doc := &nodeDocument{}
+
+ for p.Remaining() > 0 {
+ node, err := p.parseDocElement()
+ if err != nil {
+ return nil, err
+ }
+ doc.Nodes = append(doc.Nodes, node)
+ }
+
+ return doc, nil
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/parser_expression.go b/Godeps/_workspace/src/github.com/flosch/pongo2/parser_expression.go
new file mode 100644
index 0000000..79a5942
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/parser_expression.go
@@ -0,0 +1,498 @@
+package pongo2
+
+import (
+ "fmt"
+ "math"
+)
+
+type Expression struct {
+ // TODO: Add location token?
+ expr1 IEvaluator
+ expr2 IEvaluator
+ op_token *Token
+}
+
+type relationalExpression struct {
+ // TODO: Add location token?
+ expr1 IEvaluator
+ expr2 IEvaluator
+ op_token *Token
+}
+
+type simpleExpression struct {
+ negate bool
+ negative_sign bool
+ term1 IEvaluator
+ term2 IEvaluator
+ op_token *Token
+}
+
+type term struct {
+ // TODO: Add location token?
+ factor1 IEvaluator
+ factor2 IEvaluator
+ op_token *Token
+}
+
+type power struct {
+ // TODO: Add location token?
+ power1 IEvaluator
+ power2 IEvaluator
+}
+
+func (expr *Expression) FilterApplied(name string) bool {
+ return expr.expr1.FilterApplied(name) && (expr.expr2 == nil ||
+ (expr.expr2 != nil && expr.expr2.FilterApplied(name)))
+}
+
+func (expr *relationalExpression) FilterApplied(name string) bool {
+ return expr.expr1.FilterApplied(name) && (expr.expr2 == nil ||
+ (expr.expr2 != nil && expr.expr2.FilterApplied(name)))
+}
+
+func (expr *simpleExpression) FilterApplied(name string) bool {
+ return expr.term1.FilterApplied(name) && (expr.term2 == nil ||
+ (expr.term2 != nil && expr.term2.FilterApplied(name)))
+}
+
+func (t *term) FilterApplied(name string) bool {
+ return t.factor1.FilterApplied(name) && (t.factor2 == nil ||
+ (t.factor2 != nil && t.factor2.FilterApplied(name)))
+}
+
+func (p *power) FilterApplied(name string) bool {
+ return p.power1.FilterApplied(name) && (p.power2 == nil ||
+ (p.power2 != nil && p.power2.FilterApplied(name)))
+}
+
+func (expr *Expression) GetPositionToken() *Token {
+ return expr.expr1.GetPositionToken()
+}
+
+func (expr *relationalExpression) GetPositionToken() *Token {
+ return expr.expr1.GetPositionToken()
+}
+
+func (expr *simpleExpression) GetPositionToken() *Token {
+ return expr.term1.GetPositionToken()
+}
+
+func (expr *term) GetPositionToken() *Token {
+ return expr.factor1.GetPositionToken()
+}
+
+func (expr *power) GetPositionToken() *Token {
+ return expr.power1.GetPositionToken()
+}
+
+func (expr *Expression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := expr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (expr *relationalExpression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := expr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (expr *simpleExpression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := expr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (expr *term) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := expr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (expr *power) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := expr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (expr *Expression) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ v1, err := expr.expr1.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if expr.expr2 != nil {
+ v2, err := expr.expr2.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ switch expr.op_token.Val {
+ case "and", "&&":
+ return AsValue(v1.IsTrue() && v2.IsTrue()), nil
+ case "or", "||":
+ return AsValue(v1.IsTrue() || v2.IsTrue()), nil
+ default:
+ panic(fmt.Sprintf("unimplemented: %s", expr.op_token.Val))
+ }
+ } else {
+ return v1, nil
+ }
+}
+
+func (expr *relationalExpression) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ v1, err := expr.expr1.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if expr.expr2 != nil {
+ v2, err := expr.expr2.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ switch expr.op_token.Val {
+ case "<=":
+ if v1.IsFloat() || v2.IsFloat() {
+ return AsValue(v1.Float() <= v2.Float()), nil
+ } else {
+ return AsValue(v1.Integer() <= v2.Integer()), nil
+ }
+ case ">=":
+ if v1.IsFloat() || v2.IsFloat() {
+ return AsValue(v1.Float() >= v2.Float()), nil
+ } else {
+ return AsValue(v1.Integer() >= v2.Integer()), nil
+ }
+ case "==":
+ return AsValue(v1.EqualValueTo(v2)), nil
+ case ">":
+ if v1.IsFloat() || v2.IsFloat() {
+ return AsValue(v1.Float() > v2.Float()), nil
+ } else {
+ return AsValue(v1.Integer() > v2.Integer()), nil
+ }
+ case "<":
+ if v1.IsFloat() || v2.IsFloat() {
+ return AsValue(v1.Float() < v2.Float()), nil
+ } else {
+ return AsValue(v1.Integer() < v2.Integer()), nil
+ }
+ case "!=", "<>":
+ return AsValue(!v1.EqualValueTo(v2)), nil
+ case "in":
+ return AsValue(v2.Contains(v1)), nil
+ default:
+ panic(fmt.Sprintf("unimplemented: %s", expr.op_token.Val))
+ }
+ } else {
+ return v1, nil
+ }
+}
+
+func (expr *simpleExpression) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ t1, err := expr.term1.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ result := t1
+
+ if expr.negate {
+ result = result.Negate()
+ }
+
+ if expr.negative_sign {
+ if result.IsNumber() {
+ switch {
+ case result.IsFloat():
+ result = AsValue(-1 * result.Float())
+ case result.IsInteger():
+ result = AsValue(-1 * result.Integer())
+ default:
+ panic("not possible")
+ }
+ } else {
+ return nil, ctx.Error("Negative sign on a non-number expression", expr.GetPositionToken())
+ }
+ }
+
+ if expr.term2 != nil {
+ t2, err := expr.term2.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ switch expr.op_token.Val {
+ case "+":
+ if result.IsFloat() || t2.IsFloat() {
+ // Result will be a float
+ return AsValue(result.Float() + t2.Float()), nil
+ } else {
+ // Result will be an integer
+ return AsValue(result.Integer() + t2.Integer()), nil
+ }
+ case "-":
+ if result.IsFloat() || t2.IsFloat() {
+ // Result will be a float
+ return AsValue(result.Float() - t2.Float()), nil
+ } else {
+ // Result will be an integer
+ return AsValue(result.Integer() - t2.Integer()), nil
+ }
+ default:
+ panic("unimplemented")
+ }
+ }
+
+ return result, nil
+}
+
+func (t *term) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ f1, err := t.factor1.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if t.factor2 != nil {
+ f2, err := t.factor2.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ switch t.op_token.Val {
+ case "*":
+ if f1.IsFloat() || f2.IsFloat() {
+ // Result will be float
+ return AsValue(f1.Float() * f2.Float()), nil
+ }
+ // Result will be int
+ return AsValue(f1.Integer() * f2.Integer()), nil
+ case "/":
+ if f1.IsFloat() || f2.IsFloat() {
+ // Result will be float
+ return AsValue(f1.Float() / f2.Float()), nil
+ }
+ // Result will be int
+ return AsValue(f1.Integer() / f2.Integer()), nil
+ case "%":
+ // Result will be int
+ return AsValue(f1.Integer() % f2.Integer()), nil
+ default:
+ panic("unimplemented")
+ }
+ } else {
+ return f1, nil
+ }
+}
+
+func (pw *power) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ p1, err := pw.power1.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if pw.power2 != nil {
+ p2, err := pw.power2.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return AsValue(math.Pow(p1.Float(), p2.Float())), nil
+ } else {
+ return p1, nil
+ }
+}
+
+func (p *Parser) parseFactor() (IEvaluator, *Error) {
+ if p.Match(TokenSymbol, "(") != nil {
+ expr, err := p.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ if p.Match(TokenSymbol, ")") == nil {
+ return nil, p.Error("Closing bracket expected after expression", nil)
+ }
+ return expr, nil
+ }
+
+ return p.parseVariableOrLiteralWithFilter()
+}
+
+func (p *Parser) parsePower() (IEvaluator, *Error) {
+ pw := new(power)
+
+ power1, err := p.parseFactor()
+ if err != nil {
+ return nil, err
+ }
+ pw.power1 = power1
+
+ if p.Match(TokenSymbol, "^") != nil {
+ power2, err := p.parsePower()
+ if err != nil {
+ return nil, err
+ }
+ pw.power2 = power2
+ }
+
+ if pw.power2 == nil {
+ // Shortcut for faster evaluation
+ return pw.power1, nil
+ }
+
+ return pw, nil
+}
+
+func (p *Parser) parseTerm() (IEvaluator, *Error) {
+ return_term := new(term)
+
+ factor1, err := p.parsePower()
+ if err != nil {
+ return nil, err
+ }
+ return_term.factor1 = factor1
+
+ for p.PeekOne(TokenSymbol, "*", "/", "%") != nil {
+ if return_term.op_token != nil {
+ // Create new sub-term
+ return_term = &term{
+ factor1: return_term,
+ }
+ }
+
+ op := p.Current()
+ p.Consume()
+
+ factor2, err := p.parsePower()
+ if err != nil {
+ return nil, err
+ }
+
+ return_term.op_token = op
+ return_term.factor2 = factor2
+ }
+
+ if return_term.op_token == nil {
+ // Shortcut for faster evaluation
+ return return_term.factor1, nil
+ }
+
+ return return_term, nil
+}
+
+func (p *Parser) parseSimpleExpression() (IEvaluator, *Error) {
+ expr := new(simpleExpression)
+
+ if sign := p.MatchOne(TokenSymbol, "+", "-"); sign != nil {
+ if sign.Val == "-" {
+ expr.negative_sign = true
+ }
+ }
+
+ if p.Match(TokenSymbol, "!") != nil || p.Match(TokenKeyword, "not") != nil {
+ expr.negate = true
+ }
+
+ term1, err := p.parseTerm()
+ if err != nil {
+ return nil, err
+ }
+ expr.term1 = term1
+
+ for p.PeekOne(TokenSymbol, "+", "-") != nil {
+ if expr.op_token != nil {
+ // New sub expr
+ expr = &simpleExpression{
+ term1: expr,
+ }
+ }
+
+ op := p.Current()
+ p.Consume()
+
+ term2, err := p.parseTerm()
+ if err != nil {
+ return nil, err
+ }
+
+ expr.term2 = term2
+ expr.op_token = op
+ }
+
+ if expr.negate == false && expr.negative_sign == false && expr.term2 == nil {
+ // Shortcut for faster evaluation
+ return expr.term1, nil
+ }
+
+ return expr, nil
+}
+
+func (p *Parser) parseRelationalExpression() (IEvaluator, *Error) {
+ expr1, err := p.parseSimpleExpression()
+ if err != nil {
+ return nil, err
+ }
+
+ expr := &relationalExpression{
+ expr1: expr1,
+ }
+
+ if t := p.MatchOne(TokenSymbol, "==", "<=", ">=", "!=", "<>", ">", "<"); t != nil {
+ expr2, err := p.parseRelationalExpression()
+ if err != nil {
+ return nil, err
+ }
+ expr.op_token = t
+ expr.expr2 = expr2
+ } else if t := p.MatchOne(TokenKeyword, "in"); t != nil {
+ expr2, err := p.parseSimpleExpression()
+ if err != nil {
+ return nil, err
+ }
+ expr.op_token = t
+ expr.expr2 = expr2
+ }
+
+ if expr.expr2 == nil {
+ // Shortcut for faster evaluation
+ return expr.expr1, nil
+ }
+
+ return expr, nil
+}
+
+func (p *Parser) ParseExpression() (IEvaluator, *Error) {
+ rexpr1, err := p.parseRelationalExpression()
+ if err != nil {
+ return nil, err
+ }
+
+ exp := &Expression{
+ expr1: rexpr1,
+ }
+
+ if p.PeekOne(TokenSymbol, "&&", "||") != nil || p.PeekOne(TokenKeyword, "and", "or") != nil {
+ op := p.Current()
+ p.Consume()
+ expr2, err := p.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ exp.expr2 = expr2
+ exp.op_token = op
+ }
+
+ if exp.expr2 == nil {
+ // Shortcut for faster evaluation
+ return exp.expr1, nil
+ }
+
+ return exp, nil
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/pongo2.go b/Godeps/_workspace/src/github.com/flosch/pongo2/pongo2.go
new file mode 100644
index 0000000..8b98b56
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/pongo2.go
@@ -0,0 +1,14 @@
+package pongo2
+
+// Version string
+const Version = "dev"
+
+// Helper function which panics, if a Template couldn't
+// successfully parsed. This is how you would use it:
+// var baseTemplate = pongo2.Must(pongo2.FromFile("templates/base.html"))
+func Must(tpl *Template, err error) *Template {
+ if err != nil {
+ panic(err)
+ }
+ return tpl
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/pongo2_issues_test.go b/Godeps/_workspace/src/github.com/flosch/pongo2/pongo2_issues_test.go
new file mode 100644
index 0000000..731a290
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/pongo2_issues_test.go
@@ -0,0 +1,20 @@
+package pongo2
+
+import (
+ "testing"
+
+ . "gopkg.in/check.v1"
+)
+
+// Hook up gocheck into the "go test" runner.
+
+func TestIssues(t *testing.T) { TestingT(t) }
+
+type IssueTestSuite struct{}
+
+var _ = Suite(&IssueTestSuite{})
+
+func (s *TestSuite) TestIssues(c *C) {
+ // Add a test for any issue
+ c.Check(42, Equals, 42)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/pongo2_template_test.go b/Godeps/_workspace/src/github.com/flosch/pongo2/pongo2_template_test.go
new file mode 100644
index 0000000..0096986
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/pongo2_template_test.go
@@ -0,0 +1,540 @@
+package pongo2
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "testing"
+ "time"
+)
+
+var admin_list = []string{"user2"}
+
+var time1 = time.Date(2014, 06, 10, 15, 30, 15, 0, time.UTC)
+var time2 = time.Date(2011, 03, 21, 8, 37, 56, 12, time.UTC)
+
+type post struct {
+ Text string
+ Created time.Time
+}
+
+type user struct {
+ Name string
+ Validated bool
+}
+
+type comment struct {
+ Author *user
+ Date time.Time
+ Text string
+}
+
+func is_admin(u *user) bool {
+ for _, a := range admin_list {
+ if a == u.Name {
+ return true
+ }
+ }
+ return false
+}
+
+func (u *user) Is_admin() *Value {
+ return AsValue(is_admin(u))
+}
+
+func (u *user) Is_admin2() bool {
+ return is_admin(u)
+}
+
+func (p *post) String() string {
+ return ":-)"
+}
+
+/*
+ * Start setup sandbox
+ */
+
+type tagSandboxDemoTag struct {
+}
+
+func (node *tagSandboxDemoTag) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ writer.WriteString("hello")
+ return nil
+}
+
+func tagSandboxDemoTagParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ return &tagSandboxDemoTag{}, nil
+}
+
+func BannedFilterFn(in *Value, params *Value) (*Value, *Error) {
+ return in, nil
+}
+
+func init() {
+ DefaultSet.Debug = true
+
+ RegisterFilter("banned_filter", BannedFilterFn)
+ RegisterFilter("unbanned_filter", BannedFilterFn)
+ RegisterTag("banned_tag", tagSandboxDemoTagParser)
+ RegisterTag("unbanned_tag", tagSandboxDemoTagParser)
+
+ DefaultSet.BanFilter("banned_filter")
+ DefaultSet.BanTag("banned_tag")
+
+ // Allow different kind of levels inside template_tests/
+ abs_path, err := filepath.Abs("./template_tests/*")
+ if err != nil {
+ panic(err)
+ }
+ DefaultSet.SandboxDirectories = append(DefaultSet.SandboxDirectories, abs_path)
+
+ abs_path, err = filepath.Abs("./template_tests/*/*")
+ if err != nil {
+ panic(err)
+ }
+ DefaultSet.SandboxDirectories = append(DefaultSet.SandboxDirectories, abs_path)
+
+ abs_path, err = filepath.Abs("./template_tests/*/*/*")
+ if err != nil {
+ panic(err)
+ }
+ DefaultSet.SandboxDirectories = append(DefaultSet.SandboxDirectories, abs_path)
+
+ // Allow pongo2 temp files
+ DefaultSet.SandboxDirectories = append(DefaultSet.SandboxDirectories, "/tmp/pongo2_*")
+
+ f, err := ioutil.TempFile("/tmp/", "pongo2_")
+ if err != nil {
+ panic("cannot write to /tmp/")
+ }
+ f.Write([]byte("Hello from pongo2"))
+ DefaultSet.Globals["temp_file"] = f.Name()
+}
+
+/*
+ * End setup sandbox
+ */
+
+var tplContext = Context{
+ "number": 11,
+ "simple": map[string]interface{}{
+ "number": 42,
+ "name": "john doe",
+ "included_file": "INCLUDES.helper",
+ "included_file_not_exists": "INCLUDES.helper.not_exists",
+ "nil": nil,
+ "uint": uint(8),
+ "float": float64(3.1415),
+ "str": "string",
+ "chinese_hello_world": "ä½ å¥½ä¸–ç•Œ",
+ "bool_true": true,
+ "bool_false": false,
+ "newline_text": `this is a text
+with a new line in it`,
+ "long_text": `This is a simple text.
+
+This too, as a paragraph.
+Right?
+
+Yep!`,
+ "escape_js_test": `escape sequences \r\n\'\" special chars "?!=$<>`,
+ "one_item_list": []int{99},
+ "multiple_item_list": []int{1, 1, 2, 3, 5, 8, 13, 21, 34, 55},
+ "misc_list": []interface{}{"Hello", 99, 3.14, "good"},
+ "escape_text": "This is \\a Test. \"Yep\". 'Yep'.",
+ "xss": "",
+ "intmap": map[int]string{
+ 1: "one",
+ 2: "two",
+ 5: "five",
+ },
+ "func_add": func(a, b int) int {
+ return a + b
+ },
+ "func_add_iface": func(a, b interface{}) interface{} {
+ return a.(int) + b.(int)
+ },
+ "func_variadic": func(msg string, args ...interface{}) string {
+ return fmt.Sprintf(msg, args...)
+ },
+ "func_variadic_sum_int": func(args ...int) int {
+ // Create a sum
+ s := 0
+ for _, i := range args {
+ s += i
+ }
+ return s
+ },
+ "func_variadic_sum_int2": func(args ...*Value) *Value {
+ // Create a sum
+ s := 0
+ for _, i := range args {
+ s += i.Integer()
+ }
+ return AsValue(s)
+ },
+ },
+ "complex": map[string]interface{}{
+ "is_admin": is_admin,
+ "post": post{
+ Text: "Hello!
Welcome to my new blog page. I'm using pongo2 which supports {{ variables }} and {% tags %}.
",
+ Created: time2,
+ },
+ "comments": []*comment{
+ &comment{
+ Author: &user{
+ Name: "user1",
+ Validated: true,
+ },
+ Date: time1,
+ Text: "\"pongo2 is nice!\"",
+ },
+ &comment{
+ Author: &user{
+ Name: "user2",
+ Validated: true,
+ },
+ Date: time2,
+ Text: "comment2 with tags in it",
+ },
+ &comment{
+ Author: &user{
+ Name: "user3",
+ Validated: false,
+ },
+ Date: time1,
+ Text: "hello! there",
+ },
+ },
+ "comments2": []*comment{
+ &comment{
+ Author: &user{
+ Name: "user1",
+ Validated: true,
+ },
+ Date: time2,
+ Text: "\"pongo2 is nice!\"",
+ },
+ &comment{
+ Author: &user{
+ Name: "user1",
+ Validated: true,
+ },
+ Date: time1,
+ Text: "comment2 with tags in it",
+ },
+ &comment{
+ Author: &user{
+ Name: "user3",
+ Validated: false,
+ },
+ Date: time1,
+ Text: "hello! there",
+ },
+ },
+ },
+}
+
+func TestTemplates(t *testing.T) {
+ debug = true
+
+ // Add a global to the default set
+ Globals["this_is_a_global_variable"] = "this is a global text"
+
+ matches, err := filepath.Glob("./template_tests/*.tpl")
+ if err != nil {
+ t.Fatal(err)
+ }
+ for idx, match := range matches {
+ t.Logf("[Template %3d] Testing '%s'", idx+1, match)
+ tpl, err := FromFile(match)
+ if err != nil {
+ t.Fatalf("Error on FromFile('%s'): %s", match, err.Error())
+ }
+ test_filename := fmt.Sprintf("%s.out", match)
+ test_out, rerr := ioutil.ReadFile(test_filename)
+ if rerr != nil {
+ t.Fatalf("Error on ReadFile('%s'): %s", test_filename, rerr.Error())
+ }
+ tpl_out, err := tpl.ExecuteBytes(tplContext)
+ if err != nil {
+ t.Fatalf("Error on Execute('%s'): %s", match, err.Error())
+ }
+ if bytes.Compare(test_out, tpl_out) != 0 {
+ t.Logf("Template (rendered) '%s': '%s'", match, tpl_out)
+ err_filename := filepath.Base(fmt.Sprintf("%s.error", match))
+ err := ioutil.WriteFile(err_filename, []byte(tpl_out), 0600)
+ if err != nil {
+ t.Fatalf(err.Error())
+ }
+ t.Logf("get a complete diff with command: 'diff -ya %s %s'", test_filename, err_filename)
+ t.Errorf("Failed: test_out != tpl_out for %s", match)
+ }
+ }
+}
+
+func TestExecutionErrors(t *testing.T) {
+ debug = true
+
+ matches, err := filepath.Glob("./template_tests/*-execution.err")
+ if err != nil {
+ t.Fatal(err)
+ }
+ for idx, match := range matches {
+ t.Logf("[Errors %3d] Testing '%s'", idx+1, match)
+
+ test_data, err := ioutil.ReadFile(match)
+ tests := strings.Split(string(test_data), "\n")
+
+ check_filename := fmt.Sprintf("%s.out", match)
+ check_data, err := ioutil.ReadFile(check_filename)
+ if err != nil {
+ t.Fatalf("Error on ReadFile('%s'): %s", check_filename, err.Error())
+ }
+ checks := strings.Split(string(check_data), "\n")
+
+ if len(checks) != len(tests) {
+ t.Fatal("Template lines != Checks lines")
+ }
+
+ for idx, test := range tests {
+ if strings.TrimSpace(test) == "" {
+ continue
+ }
+ if strings.TrimSpace(checks[idx]) == "" {
+ t.Fatalf("[%s Line %d] Check is empty (must contain an regular expression).",
+ match, idx+1)
+ }
+
+ tpl, err := FromString(test)
+ if err != nil {
+ t.Fatalf("Error on FromString('%s'): %s", test, err.Error())
+ }
+
+ _, err = tpl.ExecuteBytes(tplContext)
+ if err == nil {
+ t.Fatalf("[%s Line %d] Expected error for (got none): %s",
+ match, idx+1, tests[idx])
+ }
+
+ re := regexp.MustCompile(fmt.Sprintf("^%s$", checks[idx]))
+ if !re.MatchString(err.Error()) {
+ t.Fatalf("[%s Line %d] Error for '%s' (err = '%s') does not match the (regexp-)check: %s",
+ match, idx+1, test, err.Error(), checks[idx])
+ }
+ }
+ }
+}
+
+func TestCompilationErrors(t *testing.T) {
+ debug = true
+
+ matches, err := filepath.Glob("./template_tests/*-compilation.err")
+ if err != nil {
+ t.Fatal(err)
+ }
+ for idx, match := range matches {
+ t.Logf("[Errors %3d] Testing '%s'", idx+1, match)
+
+ test_data, err := ioutil.ReadFile(match)
+ tests := strings.Split(string(test_data), "\n")
+
+ check_filename := fmt.Sprintf("%s.out", match)
+ check_data, err := ioutil.ReadFile(check_filename)
+ if err != nil {
+ t.Fatalf("Error on ReadFile('%s'): %s", check_filename, err.Error())
+ }
+ checks := strings.Split(string(check_data), "\n")
+
+ if len(checks) != len(tests) {
+ t.Fatal("Template lines != Checks lines")
+ }
+
+ for idx, test := range tests {
+ if strings.TrimSpace(test) == "" {
+ continue
+ }
+ if strings.TrimSpace(checks[idx]) == "" {
+ t.Fatalf("[%s Line %d] Check is empty (must contain an regular expression).",
+ match, idx+1)
+ }
+
+ _, err = FromString(test)
+ if err == nil {
+ t.Fatalf("[%s | Line %d] Expected error for (got none): %s", match, idx+1, tests[idx])
+ }
+ re := regexp.MustCompile(fmt.Sprintf("^%s$", checks[idx]))
+ if !re.MatchString(err.Error()) {
+ t.Fatalf("[%s | Line %d] Error for '%s' (err = '%s') does not match the (regexp-)check: %s",
+ match, idx+1, test, err.Error(), checks[idx])
+ }
+ }
+ }
+}
+
+func TestBaseDirectory(t *testing.T) {
+ mustStr := "Hello from template_tests/base_dir_test/"
+
+ s := NewSet("test set with base directory")
+ s.Globals["base_directory"] = "template_tests/base_dir_test/"
+ if err := s.SetBaseDirectory(s.Globals["base_directory"].(string)); err != nil {
+ t.Fatal(err)
+ }
+
+ matches, err := filepath.Glob("./template_tests/base_dir_test/subdir/*")
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, match := range matches {
+ match = strings.Replace(match, "template_tests/base_dir_test/", "", -1)
+
+ tpl, err := s.FromFile(match)
+ if err != nil {
+ t.Fatal(err)
+ }
+ out, err := tpl.Execute(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if out != mustStr {
+ t.Errorf("%s: out ('%s') != mustStr ('%s')", match, out, mustStr)
+ }
+ }
+}
+
+func BenchmarkCache(b *testing.B) {
+ cache_set := NewSet("cache set")
+ for i := 0; i < b.N; i++ {
+ tpl, err := cache_set.FromCache("template_tests/complex.tpl")
+ if err != nil {
+ b.Fatal(err)
+ }
+ err = tpl.ExecuteWriterUnbuffered(tplContext, ioutil.Discard)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkCacheDebugOn(b *testing.B) {
+ cache_debug_set := NewSet("cache set")
+ cache_debug_set.Debug = true
+ for i := 0; i < b.N; i++ {
+ tpl, err := cache_debug_set.FromFile("template_tests/complex.tpl")
+ if err != nil {
+ b.Fatal(err)
+ }
+ err = tpl.ExecuteWriterUnbuffered(tplContext, ioutil.Discard)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkExecuteComplexWithSandboxActive(b *testing.B) {
+ tpl, err := FromFile("template_tests/complex.tpl")
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ err = tpl.ExecuteWriterUnbuffered(tplContext, ioutil.Discard)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkCompileAndExecuteComplexWithSandboxActive(b *testing.B) {
+ buf, err := ioutil.ReadFile("template_tests/complex.tpl")
+ if err != nil {
+ b.Fatal(err)
+ }
+ preloadedTpl := string(buf)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ tpl, err := FromString(preloadedTpl)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ err = tpl.ExecuteWriterUnbuffered(tplContext, ioutil.Discard)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkParallelExecuteComplexWithSandboxActive(b *testing.B) {
+ tpl, err := FromFile("template_tests/complex.tpl")
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.ResetTimer()
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ err := tpl.ExecuteWriterUnbuffered(tplContext, ioutil.Discard)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
+
+func BenchmarkExecuteComplexWithoutSandbox(b *testing.B) {
+ s := NewSet("set without sandbox")
+ tpl, err := s.FromFile("template_tests/complex.tpl")
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ err = tpl.ExecuteWriterUnbuffered(tplContext, ioutil.Discard)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkCompileAndExecuteComplexWithoutSandbox(b *testing.B) {
+ buf, err := ioutil.ReadFile("template_tests/complex.tpl")
+ if err != nil {
+ b.Fatal(err)
+ }
+ preloadedTpl := string(buf)
+
+ s := NewSet("set without sandbox")
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ tpl, err := s.FromString(preloadedTpl)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ err = tpl.ExecuteWriterUnbuffered(tplContext, ioutil.Discard)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkParallelExecuteComplexWithoutSandbox(b *testing.B) {
+ s := NewSet("set without sandbox")
+ tpl, err := s.FromFile("template_tests/complex.tpl")
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.ResetTimer()
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ err := tpl.ExecuteWriterUnbuffered(tplContext, ioutil.Discard)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/pongo2_test.go b/Godeps/_workspace/src/github.com/flosch/pongo2/pongo2_test.go
new file mode 100644
index 0000000..5f54584
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/pongo2_test.go
@@ -0,0 +1,66 @@
+package pongo2
+
+import (
+ "testing"
+
+ . "gopkg.in/check.v1"
+)
+
+// Hook up gocheck into the "go test" runner.
+
+func Test(t *testing.T) { TestingT(t) }
+
+type TestSuite struct {
+ tpl *Template
+}
+
+var (
+ _ = Suite(&TestSuite{})
+ test_suite2 = NewSet("test suite 2")
+)
+
+func parseTemplate(s string, c Context) string {
+ t, err := test_suite2.FromString(s)
+ if err != nil {
+ panic(err)
+ }
+ out, err := t.Execute(c)
+ if err != nil {
+ panic(err)
+ }
+ return out
+}
+
+func parseTemplateFn(s string, c Context) func() {
+ return func() {
+ parseTemplate(s, c)
+ }
+}
+
+func (s *TestSuite) TestMisc(c *C) {
+ // Must
+ // TODO: Add better error message (see issue #18)
+ c.Check(func() { Must(test_suite2.FromFile("template_tests/inheritance/base2.tpl")) },
+ PanicMatches,
+ `\[Error \(where: fromfile\) in template_tests/inheritance/doesnotexist.tpl | Line 1 Col 12 near 'doesnotexist.tpl'\] open template_tests/inheritance/doesnotexist.tpl: no such file or directory`)
+
+ // Context
+ c.Check(parseTemplateFn("", Context{"'illegal": nil}), PanicMatches, ".*not a valid identifier.*")
+
+ // Registers
+ c.Check(func() { RegisterFilter("escape", nil) }, PanicMatches, ".*is already registered.*")
+ c.Check(func() { RegisterTag("for", nil) }, PanicMatches, ".*is already registered.*")
+
+ // ApplyFilter
+ v, err := ApplyFilter("title", AsValue("this is a title"), nil)
+ if err != nil {
+ c.Fatal(err)
+ }
+ c.Check(v.String(), Equals, "This Is A Title")
+ c.Check(func() {
+ _, err := ApplyFilter("doesnotexist", nil, nil)
+ if err != nil {
+ panic(err)
+ }
+ }, PanicMatches, `\[Error \(where: applyfilter\)\] Filter with name 'doesnotexist' not found.`)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags.go
new file mode 100644
index 0000000..292c30d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags.go
@@ -0,0 +1,132 @@
+package pongo2
+
+/* Incomplete:
+ -----------
+
+ verbatim (only the "name" argument is missing for verbatim)
+
+ Reconsideration:
+ ----------------
+
+ debug (reason: not sure what to output yet)
+ regroup / Grouping on other properties (reason: maybe too python-specific; not sure how useful this would be in Go)
+
+ Following built-in tags wont be added:
+ --------------------------------------
+
+ csrf_token (reason: web-framework specific)
+ load (reason: python-specific)
+ url (reason: web-framework specific)
+*/
+
+import (
+ "fmt"
+)
+
+type INodeTag interface {
+ INode
+}
+
+// This is the function signature of the tag's parser you will have
+// to implement in order to create a new tag.
+//
+// 'doc' is providing access to the whole document while 'arguments'
+// is providing access to the user's arguments to the tag:
+//
+// {% your_tag_name some "arguments" 123 %}
+//
+// start_token will be the *Token with the tag's name in it (here: your_tag_name).
+//
+// Please see the Parser documentation on how to use the parser.
+// See RegisterTag()'s documentation for more information about
+// writing a tag as well.
+type TagParser func(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error)
+
+type tag struct {
+ name string
+ parser TagParser
+}
+
+var tags map[string]*tag
+
+func init() {
+ tags = make(map[string]*tag)
+}
+
+// Registers a new tag. If there's already a tag with the same
+// name, RegisterTag will panic. You usually want to call this
+// function in the tag's init() function:
+// http://golang.org/doc/effective_go.html#init
+//
+// See http://www.florian-schlachter.de/post/pongo2/ for more about
+// writing filters and tags.
+func RegisterTag(name string, parserFn TagParser) {
+ _, existing := tags[name]
+ if existing {
+ panic(fmt.Sprintf("Tag with name '%s' is already registered.", name))
+ }
+ tags[name] = &tag{
+ name: name,
+ parser: parserFn,
+ }
+}
+
+// Replaces an already registered tag with a new implementation. Use this
+// function with caution since it allows you to change existing tag behaviour.
+func ReplaceTag(name string, parserFn TagParser) {
+ _, existing := tags[name]
+ if !existing {
+ panic(fmt.Sprintf("Tag with name '%s' does not exist (therefore cannot be overridden).", name))
+ }
+ tags[name] = &tag{
+ name: name,
+ parser: parserFn,
+ }
+}
+
+// Tag = "{%" IDENT ARGS "%}"
+func (p *Parser) parseTagElement() (INodeTag, *Error) {
+ p.Consume() // consume "{%"
+ token_name := p.MatchType(TokenIdentifier)
+
+ // Check for identifier
+ if token_name == nil {
+ return nil, p.Error("Tag name must be an identifier.", nil)
+ }
+
+ // Check for the existing tag
+ tag, exists := tags[token_name.Val]
+ if !exists {
+ // Does not exists
+ return nil, p.Error(fmt.Sprintf("Tag '%s' not found (or beginning tag not provided)", token_name.Val), token_name)
+ }
+
+ // Check sandbox tag restriction
+ if _, is_banned := p.template.set.bannedTags[token_name.Val]; is_banned {
+ return nil, p.Error(fmt.Sprintf("Usage of tag '%s' is not allowed (sandbox restriction active).", token_name.Val), token_name)
+ }
+
+ args_token := make([]*Token, 0)
+ for p.Peek(TokenSymbol, "%}") == nil && p.Remaining() > 0 {
+ // Add token to args
+ args_token = append(args_token, p.Current())
+ p.Consume() // next token
+ }
+
+ // EOF?
+ if p.Remaining() == 0 {
+ return nil, p.Error("Unexpectedly reached EOF, no tag end found.", p.last_token)
+ }
+
+ p.Match(TokenSymbol, "%}")
+
+ arg_parser := newParser(p.name, args_token, p.template)
+ if len(args_token) == 0 {
+ // This is done to have nice EOF error messages
+ arg_parser.last_token = token_name
+ }
+
+ p.template.level++
+ defer func() { p.template.level-- }()
+ return tag.parser(p, token_name, arg_parser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_autoescape.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_autoescape.go
new file mode 100644
index 0000000..3cfc901
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_autoescape.go
@@ -0,0 +1,52 @@
+package pongo2
+
+type tagAutoescapeNode struct {
+ wrapper *NodeWrapper
+ autoescape bool
+}
+
+func (node *tagAutoescapeNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ old := ctx.Autoescape
+ ctx.Autoescape = node.autoescape
+
+ err := node.wrapper.Execute(ctx, writer)
+ if err != nil {
+ return err
+ }
+
+ ctx.Autoescape = old
+
+ return nil
+}
+
+func tagAutoescapeParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ autoescape_node := &tagAutoescapeNode{}
+
+ wrapper, _, err := doc.WrapUntilTag("endautoescape")
+ if err != nil {
+ return nil, err
+ }
+ autoescape_node.wrapper = wrapper
+
+ mode_token := arguments.MatchType(TokenIdentifier)
+ if mode_token == nil {
+ return nil, arguments.Error("A mode is required for autoescape-tag.", nil)
+ }
+ if mode_token.Val == "on" {
+ autoescape_node.autoescape = true
+ } else if mode_token.Val == "off" {
+ autoescape_node.autoescape = false
+ } else {
+ return nil, arguments.Error("Only 'on' or 'off' is valid as an autoescape-mode.", nil)
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed autoescape-tag arguments.", nil)
+ }
+
+ return autoescape_node, nil
+}
+
+func init() {
+ RegisterTag("autoescape", tagAutoescapeParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_block.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_block.go
new file mode 100644
index 0000000..c82bf3f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_block.go
@@ -0,0 +1,93 @@
+package pongo2
+
+import (
+ "fmt"
+)
+
+type tagBlockNode struct {
+ name string
+}
+
+func (node *tagBlockNode) getBlockWrapperByName(tpl *Template) *NodeWrapper {
+ var t *NodeWrapper
+ if tpl.child != nil {
+ // First ask the child for the block
+ t = node.getBlockWrapperByName(tpl.child)
+ }
+ if t == nil {
+ // Child has no block, lets look up here at parent
+ t = tpl.blocks[node.name]
+ }
+ return t
+}
+
+func (node *tagBlockNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ tpl := ctx.template
+ if tpl == nil {
+ panic("internal error: tpl == nil")
+ }
+ // Determine the block to execute
+ block_wrapper := node.getBlockWrapperByName(tpl)
+ if block_wrapper == nil {
+ // fmt.Printf("could not find: %s\n", node.name)
+ return ctx.Error("internal error: block_wrapper == nil in tagBlockNode.Execute()", nil)
+ }
+ err := block_wrapper.Execute(ctx, writer)
+ if err != nil {
+ return err
+ }
+
+ // TODO: Add support for {{ block.super }}
+
+ return nil
+}
+
+func tagBlockParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ if arguments.Count() == 0 {
+ return nil, arguments.Error("Tag 'block' requires an identifier.", nil)
+ }
+
+ name_token := arguments.MatchType(TokenIdentifier)
+ if name_token == nil {
+ return nil, arguments.Error("First argument for tag 'block' must be an identifier.", nil)
+ }
+
+ if arguments.Remaining() != 0 {
+ return nil, arguments.Error("Tag 'block' takes exactly 1 argument (an identifier).", nil)
+ }
+
+ wrapper, endtagargs, err := doc.WrapUntilTag("endblock")
+ if err != nil {
+ return nil, err
+ }
+ if endtagargs.Remaining() > 0 {
+ endtagname_token := endtagargs.MatchType(TokenIdentifier)
+ if endtagname_token != nil {
+ if endtagname_token.Val != name_token.Val {
+ return nil, endtagargs.Error(fmt.Sprintf("Name for 'endblock' must equal to 'block'-tag's name ('%s' != '%s').",
+ name_token.Val, endtagname_token.Val), nil)
+ }
+ }
+
+ if endtagname_token == nil || endtagargs.Remaining() > 0 {
+ return nil, endtagargs.Error("Either no or only one argument (identifier) allowed for 'endblock'.", nil)
+ }
+ }
+
+ tpl := doc.template
+ if tpl == nil {
+ panic("internal error: tpl == nil")
+ }
+ _, has_block := tpl.blocks[name_token.Val]
+ if !has_block {
+ tpl.blocks[name_token.Val] = wrapper
+ } else {
+ return nil, arguments.Error(fmt.Sprintf("Block named '%s' already defined", name_token.Val), nil)
+ }
+
+ return &tagBlockNode{name: name_token.Val}, nil
+}
+
+func init() {
+ RegisterTag("block", tagBlockParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_comment.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_comment.go
new file mode 100644
index 0000000..32f4aff
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_comment.go
@@ -0,0 +1,27 @@
+package pongo2
+
+type tagCommentNode struct{}
+
+func (node *tagCommentNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ return nil
+}
+
+func tagCommentParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ comment_node := &tagCommentNode{}
+
+ // TODO: Process the endtag's arguments (see django 'comment'-tag documentation)
+ _, _, err := doc.WrapUntilTag("endcomment")
+ if err != nil {
+ return nil, err
+ }
+
+ if arguments.Count() != 0 {
+ return nil, arguments.Error("Tag 'comment' does not take any argument.", nil)
+ }
+
+ return comment_node, nil
+}
+
+func init() {
+ RegisterTag("comment", tagCommentParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_cycle.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_cycle.go
new file mode 100644
index 0000000..9aa1e8c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_cycle.go
@@ -0,0 +1,106 @@
+package pongo2
+
+type tagCycleValue struct {
+ node *tagCycleNode
+ value *Value
+}
+
+type tagCycleNode struct {
+ position *Token
+ args []IEvaluator
+ idx int
+ as_name string
+ silent bool
+}
+
+func (cv *tagCycleValue) String() string {
+ return cv.value.String()
+}
+
+func (node *tagCycleNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ item := node.args[node.idx%len(node.args)]
+ node.idx++
+
+ val, err := item.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ if t, ok := val.Interface().(*tagCycleValue); ok {
+ // {% cycle "test1" "test2"
+ // {% cycle cycleitem %}
+
+ // Update the cycle value with next value
+ item := t.node.args[t.node.idx%len(t.node.args)]
+ t.node.idx++
+
+ val, err := item.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ t.value = val
+
+ if !t.node.silent {
+ writer.WriteString(val.String())
+ }
+ } else {
+ // Regular call
+
+ cycle_value := &tagCycleValue{
+ node: node,
+ value: val,
+ }
+
+ if node.as_name != "" {
+ ctx.Private[node.as_name] = cycle_value
+ }
+ if !node.silent {
+ writer.WriteString(val.String())
+ }
+ }
+
+ return nil
+}
+
+// HINT: We're not supporting the old comma-seperated list of expresions argument-style
+func tagCycleParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ cycle_node := &tagCycleNode{
+ position: start,
+ }
+
+ for arguments.Remaining() > 0 {
+ node, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ cycle_node.args = append(cycle_node.args, node)
+
+ if arguments.MatchOne(TokenKeyword, "as") != nil {
+ // as
+
+ name_token := arguments.MatchType(TokenIdentifier)
+ if name_token == nil {
+ return nil, arguments.Error("Name (identifier) expected after 'as'.", nil)
+ }
+ cycle_node.as_name = name_token.Val
+
+ if arguments.MatchOne(TokenIdentifier, "silent") != nil {
+ cycle_node.silent = true
+ }
+
+ // Now we're finished
+ break
+ }
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed cycle-tag.", nil)
+ }
+
+ return cycle_node, nil
+}
+
+func init() {
+ RegisterTag("cycle", tagCycleParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_extends.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_extends.go
new file mode 100644
index 0000000..b973632
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_extends.go
@@ -0,0 +1,52 @@
+package pongo2
+
+type tagExtendsNode struct {
+ filename string
+}
+
+func (node *tagExtendsNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ return nil
+}
+
+func tagExtendsParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ extends_node := &tagExtendsNode{}
+
+ if doc.template.level > 1 {
+ return nil, arguments.Error("The 'extends' tag can only defined on root level.", start)
+ }
+
+ if doc.template.parent != nil {
+ // Already one parent
+ return nil, arguments.Error("This template has already one parent.", start)
+ }
+
+ if filename_token := arguments.MatchType(TokenString); filename_token != nil {
+ // prepared, static template
+
+ // Get parent's filename
+ parent_filename := doc.template.set.resolveFilename(doc.template, filename_token.Val)
+
+ // Parse the parent
+ parent_template, err := doc.template.set.FromFile(parent_filename)
+ if err != nil {
+ return nil, err.(*Error)
+ }
+
+ // Keep track of things
+ parent_template.child = doc.template
+ doc.template.parent = parent_template
+ extends_node.filename = parent_filename
+ } else {
+ return nil, arguments.Error("Tag 'extends' requires a template filename as string.", nil)
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Tag 'extends' does only take 1 argument.", nil)
+ }
+
+ return extends_node, nil
+}
+
+func init() {
+ RegisterTag("extends", tagExtendsParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_filter.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_filter.go
new file mode 100644
index 0000000..da41835
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_filter.go
@@ -0,0 +1,95 @@
+package pongo2
+
+import (
+ "bytes"
+)
+
+type nodeFilterCall struct {
+ name string
+ param_expr IEvaluator
+}
+
+type tagFilterNode struct {
+ position *Token
+ bodyWrapper *NodeWrapper
+ filterChain []*nodeFilterCall
+}
+
+func (node *tagFilterNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ temp := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB size
+
+ err := node.bodyWrapper.Execute(ctx, temp)
+ if err != nil {
+ return err
+ }
+
+ value := AsValue(temp.String())
+
+ for _, call := range node.filterChain {
+ var param *Value
+ if call.param_expr != nil {
+ param, err = call.param_expr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ } else {
+ param = AsValue(nil)
+ }
+ value, err = ApplyFilter(call.name, value, param)
+ if err != nil {
+ return ctx.Error(err.Error(), node.position)
+ }
+ }
+
+ writer.WriteString(value.String())
+
+ return nil
+}
+
+func tagFilterParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ filter_node := &tagFilterNode{
+ position: start,
+ }
+
+ wrapper, _, err := doc.WrapUntilTag("endfilter")
+ if err != nil {
+ return nil, err
+ }
+ filter_node.bodyWrapper = wrapper
+
+ for arguments.Remaining() > 0 {
+ filterCall := &nodeFilterCall{}
+
+ name_token := arguments.MatchType(TokenIdentifier)
+ if name_token == nil {
+ return nil, arguments.Error("Expected a filter name (identifier).", nil)
+ }
+ filterCall.name = name_token.Val
+
+ if arguments.MatchOne(TokenSymbol, ":") != nil {
+ // Filter parameter
+ // NOTICE: we can't use ParseExpression() here, because it would parse the next filter "|..." as well in the argument list
+ expr, err := arguments.parseVariableOrLiteral()
+ if err != nil {
+ return nil, err
+ }
+ filterCall.param_expr = expr
+ }
+
+ filter_node.filterChain = append(filter_node.filterChain, filterCall)
+
+ if arguments.MatchOne(TokenSymbol, "|") == nil {
+ break
+ }
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed filter-tag arguments.", nil)
+ }
+
+ return filter_node, nil
+}
+
+func init() {
+ RegisterTag("filter", tagFilterParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_firstof.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_firstof.go
new file mode 100644
index 0000000..588abc0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_firstof.go
@@ -0,0 +1,49 @@
+package pongo2
+
+type tagFirstofNode struct {
+ position *Token
+ args []IEvaluator
+}
+
+func (node *tagFirstofNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ for _, arg := range node.args {
+ val, err := arg.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ if val.IsTrue() {
+ if ctx.Autoescape && !arg.FilterApplied("safe") {
+ val, err = ApplyFilter("escape", val, nil)
+ if err != nil {
+ return err
+ }
+ }
+
+ writer.WriteString(val.String())
+ return nil
+ }
+ }
+
+ return nil
+}
+
+func tagFirstofParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ firstof_node := &tagFirstofNode{
+ position: start,
+ }
+
+ for arguments.Remaining() > 0 {
+ node, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ firstof_node.args = append(firstof_node.args, node)
+ }
+
+ return firstof_node, nil
+}
+
+func init() {
+ RegisterTag("firstof", tagFirstofParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_for.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_for.go
new file mode 100644
index 0000000..7b280a1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_for.go
@@ -0,0 +1,154 @@
+package pongo2
+
+type tagForNode struct {
+ key string
+ value string // only for maps: for key, value in map
+ object_evaluator IEvaluator
+ reversed bool
+
+ bodyWrapper *NodeWrapper
+ emptyWrapper *NodeWrapper
+}
+
+type tagForLoopInformation struct {
+ Counter int
+ Counter0 int
+ Revcounter int
+ Revcounter0 int
+ First bool
+ Last bool
+ Parentloop *tagForLoopInformation
+}
+
+func (node *tagForNode) Execute(ctx *ExecutionContext, writer TemplateWriter) (forError *Error) {
+ // Backup forloop (as parentloop in public context), key-name and value-name
+ forCtx := NewChildExecutionContext(ctx)
+ parentloop := forCtx.Private["forloop"]
+
+ // Create loop struct
+ loopInfo := &tagForLoopInformation{
+ First: true,
+ }
+
+ // Is it a loop in a loop?
+ if parentloop != nil {
+ loopInfo.Parentloop = parentloop.(*tagForLoopInformation)
+ }
+
+ // Register loopInfo in public context
+ forCtx.Private["forloop"] = loopInfo
+
+ obj, err := node.object_evaluator.Evaluate(forCtx)
+ if err != nil {
+ return err
+ }
+
+ obj.IterateOrder(func(idx, count int, key, value *Value) bool {
+ // There's something to iterate over (correct type and at least 1 item)
+
+ // Update loop infos and public context
+ forCtx.Private[node.key] = key
+ if value != nil {
+ forCtx.Private[node.value] = value
+ }
+ loopInfo.Counter = idx + 1
+ loopInfo.Counter0 = idx
+ if idx == 1 {
+ loopInfo.First = false
+ }
+ if idx+1 == count {
+ loopInfo.Last = true
+ }
+ loopInfo.Revcounter = count - idx // TODO: Not sure about this, have to look it up
+ loopInfo.Revcounter0 = count - (idx + 1) // TODO: Not sure about this, have to look it up
+
+ // Render elements with updated context
+ err := node.bodyWrapper.Execute(forCtx, writer)
+ if err != nil {
+ forError = err
+ return false
+ }
+ return true
+ }, func() {
+ // Nothing to iterate over (maybe wrong type or no items)
+ if node.emptyWrapper != nil {
+ err := node.emptyWrapper.Execute(forCtx, writer)
+ if err != nil {
+ forError = err
+ }
+ }
+ }, node.reversed)
+
+ return nil
+}
+
+func tagForParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ for_node := &tagForNode{}
+
+ // Arguments parsing
+ var value_token *Token
+ key_token := arguments.MatchType(TokenIdentifier)
+ if key_token == nil {
+ return nil, arguments.Error("Expected an key identifier as first argument for 'for'-tag", nil)
+ }
+
+ if arguments.Match(TokenSymbol, ",") != nil {
+ // Value name is provided
+ value_token = arguments.MatchType(TokenIdentifier)
+ if value_token == nil {
+ return nil, arguments.Error("Value name must be an identifier.", nil)
+ }
+ }
+
+ if arguments.Match(TokenKeyword, "in") == nil {
+ return nil, arguments.Error("Expected keyword 'in'.", nil)
+ }
+
+ object_evaluator, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ for_node.object_evaluator = object_evaluator
+ for_node.key = key_token.Val
+ if value_token != nil {
+ for_node.value = value_token.Val
+ }
+
+ if arguments.MatchOne(TokenIdentifier, "reversed") != nil {
+ for_node.reversed = true
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed for-loop arguments.", nil)
+ }
+
+ // Body wrapping
+ wrapper, endargs, err := doc.WrapUntilTag("empty", "endfor")
+ if err != nil {
+ return nil, err
+ }
+ for_node.bodyWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+
+ if wrapper.Endtag == "empty" {
+ // if there's an else in the if-statement, we need the else-Block as well
+ wrapper, endargs, err = doc.WrapUntilTag("endfor")
+ if err != nil {
+ return nil, err
+ }
+ for_node.emptyWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+ }
+
+ return for_node, nil
+}
+
+func init() {
+ RegisterTag("for", tagForParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_if.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_if.go
new file mode 100644
index 0000000..5e21569
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_if.go
@@ -0,0 +1,77 @@
+package pongo2
+
+type tagIfNode struct {
+ conditions []IEvaluator
+ wrappers []*NodeWrapper
+}
+
+func (node *tagIfNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ for i, condition := range node.conditions {
+ result, err := condition.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ if result.IsTrue() {
+ return node.wrappers[i].Execute(ctx, writer)
+ } else {
+ // Last condition?
+ if len(node.conditions) == i+1 && len(node.wrappers) > i+1 {
+ return node.wrappers[i+1].Execute(ctx, writer)
+ }
+ }
+ }
+ return nil
+}
+
+func tagIfParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ if_node := &tagIfNode{}
+
+ // Parse first and main IF condition
+ condition, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ if_node.conditions = append(if_node.conditions, condition)
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("If-condition is malformed.", nil)
+ }
+
+ // Check the rest
+ for {
+ wrapper, tag_args, err := doc.WrapUntilTag("elif", "else", "endif")
+ if err != nil {
+ return nil, err
+ }
+ if_node.wrappers = append(if_node.wrappers, wrapper)
+
+ if wrapper.Endtag == "elif" {
+ // elif can take a condition
+ condition, err := tag_args.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ if_node.conditions = append(if_node.conditions, condition)
+
+ if tag_args.Remaining() > 0 {
+ return nil, tag_args.Error("Elif-condition is malformed.", nil)
+ }
+ } else {
+ if tag_args.Count() > 0 {
+ // else/endif can't take any conditions
+ return nil, tag_args.Error("Arguments not allowed here.", nil)
+ }
+ }
+
+ if wrapper.Endtag == "endif" {
+ break
+ }
+ }
+
+ return if_node, nil
+}
+
+func init() {
+ RegisterTag("if", tagIfParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_ifchanged.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_ifchanged.go
new file mode 100644
index 0000000..ce189c1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_ifchanged.go
@@ -0,0 +1,117 @@
+package pongo2
+
+import (
+ "bytes"
+)
+
+type tagIfchangedNode struct {
+ watched_expr []IEvaluator
+ last_values []*Value
+ last_content []byte
+ thenWrapper *NodeWrapper
+ elseWrapper *NodeWrapper
+}
+
+func (node *tagIfchangedNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+
+ if len(node.watched_expr) == 0 {
+ // Check against own rendered body
+
+ buf := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB
+ err := node.thenWrapper.Execute(ctx, buf)
+ if err != nil {
+ return err
+ }
+
+ buf_bytes := buf.Bytes()
+ if !bytes.Equal(node.last_content, buf_bytes) {
+ // Rendered content changed, output it
+ writer.Write(buf_bytes)
+ node.last_content = buf_bytes
+ }
+ } else {
+ now_values := make([]*Value, 0, len(node.watched_expr))
+ for _, expr := range node.watched_expr {
+ val, err := expr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ now_values = append(now_values, val)
+ }
+
+ // Compare old to new values now
+ changed := len(node.last_values) == 0
+
+ for idx, old_val := range node.last_values {
+ if !old_val.EqualValueTo(now_values[idx]) {
+ changed = true
+ break // we can stop here because ONE value changed
+ }
+ }
+
+ node.last_values = now_values
+
+ if changed {
+ // Render thenWrapper
+ err := node.thenWrapper.Execute(ctx, writer)
+ if err != nil {
+ return err
+ }
+ } else {
+ // Render elseWrapper
+ err := node.elseWrapper.Execute(ctx, writer)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func tagIfchangedParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ ifchanged_node := &tagIfchangedNode{}
+
+ for arguments.Remaining() > 0 {
+ // Parse condition
+ expr, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ ifchanged_node.watched_expr = append(ifchanged_node.watched_expr, expr)
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Ifchanged-arguments are malformed.", nil)
+ }
+
+ // Wrap then/else-blocks
+ wrapper, endargs, err := doc.WrapUntilTag("else", "endifchanged")
+ if err != nil {
+ return nil, err
+ }
+ ifchanged_node.thenWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+
+ if wrapper.Endtag == "else" {
+ // if there's an else in the if-statement, we need the else-Block as well
+ wrapper, endargs, err = doc.WrapUntilTag("endifchanged")
+ if err != nil {
+ return nil, err
+ }
+ ifchanged_node.elseWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+ }
+
+ return ifchanged_node, nil
+}
+
+func init() {
+ RegisterTag("ifchanged", tagIfchangedParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_ifequal.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_ifequal.go
new file mode 100644
index 0000000..c60c5b5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_ifequal.go
@@ -0,0 +1,79 @@
+package pongo2
+
+type tagIfEqualNode struct {
+ var1, var2 IEvaluator
+ thenWrapper *NodeWrapper
+ elseWrapper *NodeWrapper
+}
+
+func (node *tagIfEqualNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ r1, err := node.var1.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ r2, err := node.var2.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ result := r1.EqualValueTo(r2)
+
+ if result {
+ return node.thenWrapper.Execute(ctx, writer)
+ } else {
+ if node.elseWrapper != nil {
+ return node.elseWrapper.Execute(ctx, writer)
+ }
+ }
+ return nil
+}
+
+func tagIfEqualParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ ifequal_node := &tagIfEqualNode{}
+
+ // Parse two expressions
+ var1, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ var2, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ ifequal_node.var1 = var1
+ ifequal_node.var2 = var2
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("ifequal only takes 2 arguments.", nil)
+ }
+
+ // Wrap then/else-blocks
+ wrapper, endargs, err := doc.WrapUntilTag("else", "endifequal")
+ if err != nil {
+ return nil, err
+ }
+ ifequal_node.thenWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+
+ if wrapper.Endtag == "else" {
+ // if there's an else in the if-statement, we need the else-Block as well
+ wrapper, endargs, err = doc.WrapUntilTag("endifequal")
+ if err != nil {
+ return nil, err
+ }
+ ifequal_node.elseWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+ }
+
+ return ifequal_node, nil
+}
+
+func init() {
+ RegisterTag("ifequal", tagIfEqualParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_ifnotequal.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_ifnotequal.go
new file mode 100644
index 0000000..80d82cd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_ifnotequal.go
@@ -0,0 +1,79 @@
+package pongo2
+
+type tagIfNotEqualNode struct {
+ var1, var2 IEvaluator
+ thenWrapper *NodeWrapper
+ elseWrapper *NodeWrapper
+}
+
+func (node *tagIfNotEqualNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ r1, err := node.var1.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ r2, err := node.var2.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ result := !r1.EqualValueTo(r2)
+
+ if result {
+ return node.thenWrapper.Execute(ctx, writer)
+ } else {
+ if node.elseWrapper != nil {
+ return node.elseWrapper.Execute(ctx, writer)
+ }
+ }
+ return nil
+}
+
+func tagIfNotEqualParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ ifnotequal_node := &tagIfNotEqualNode{}
+
+ // Parse two expressions
+ var1, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ var2, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ ifnotequal_node.var1 = var1
+ ifnotequal_node.var2 = var2
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("ifequal only takes 2 arguments.", nil)
+ }
+
+ // Wrap then/else-blocks
+ wrapper, endargs, err := doc.WrapUntilTag("else", "endifequal")
+ if err != nil {
+ return nil, err
+ }
+ ifnotequal_node.thenWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+
+ if wrapper.Endtag == "else" {
+ // if there's an else in the if-statement, we need the else-Block as well
+ wrapper, endargs, err = doc.WrapUntilTag("endifequal")
+ if err != nil {
+ return nil, err
+ }
+ ifnotequal_node.elseWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+ }
+
+ return ifnotequal_node, nil
+}
+
+func init() {
+ RegisterTag("ifnotequal", tagIfNotEqualParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_import.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_import.go
new file mode 100644
index 0000000..76a7ca2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_import.go
@@ -0,0 +1,85 @@
+package pongo2
+
+import (
+ "fmt"
+)
+
+type tagImportNode struct {
+ position *Token
+ filename string
+ template *Template
+ macros map[string]*tagMacroNode // alias/name -> macro instance
+}
+
+func (node *tagImportNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ for name, macro := range node.macros {
+ func(name string, macro *tagMacroNode) {
+ ctx.Private[name] = func(args ...*Value) *Value {
+ return macro.call(ctx, args...)
+ }
+ }(name, macro)
+ }
+ return nil
+}
+
+func tagImportParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ import_node := &tagImportNode{
+ position: start,
+ macros: make(map[string]*tagMacroNode),
+ }
+
+ filename_token := arguments.MatchType(TokenString)
+ if filename_token == nil {
+ return nil, arguments.Error("Import-tag needs a filename as string.", nil)
+ }
+
+ import_node.filename = doc.template.set.resolveFilename(doc.template, filename_token.Val)
+
+ if arguments.Remaining() == 0 {
+ return nil, arguments.Error("You must at least specify one macro to import.", nil)
+ }
+
+ // Compile the given template
+ tpl, err := doc.template.set.FromFile(import_node.filename)
+ if err != nil {
+ return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, start)
+ }
+
+ for arguments.Remaining() > 0 {
+ macro_name_token := arguments.MatchType(TokenIdentifier)
+ if macro_name_token == nil {
+ return nil, arguments.Error("Expected macro name (identifier).", nil)
+ }
+
+ as_name := macro_name_token.Val
+ if arguments.Match(TokenKeyword, "as") != nil {
+ alias_token := arguments.MatchType(TokenIdentifier)
+ if alias_token == nil {
+ return nil, arguments.Error("Expected macro alias name (identifier).", nil)
+ }
+ as_name = alias_token.Val
+ }
+
+ macro_instance, has := tpl.exported_macros[macro_name_token.Val]
+ if !has {
+ return nil, arguments.Error(fmt.Sprintf("Macro '%s' not found (or not exported) in '%s'.", macro_name_token.Val,
+ import_node.filename), macro_name_token)
+ }
+
+ import_node.macros[as_name] = macro_instance
+
+ if arguments.Remaining() == 0 {
+ break
+ }
+
+ if arguments.Match(TokenSymbol, ",") == nil {
+ return nil, arguments.Error("Expected ','.", nil)
+ }
+ }
+
+ return import_node, nil
+}
+
+func init() {
+ RegisterTag("import", tagImportParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_include.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_include.go
new file mode 100644
index 0000000..24313fc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_include.go
@@ -0,0 +1,147 @@
+package pongo2
+
+type tagIncludeNode struct {
+ tpl *Template
+ filename_evaluator IEvaluator
+ lazy bool
+ only bool
+ filename string
+ with_pairs map[string]IEvaluator
+ if_exists bool
+}
+
+func (node *tagIncludeNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ // Building the context for the template
+ include_ctx := make(Context)
+
+ // Fill the context with all data from the parent
+ if !node.only {
+ include_ctx.Update(ctx.Public)
+ include_ctx.Update(ctx.Private)
+ }
+
+ // Put all custom with-pairs into the context
+ for key, value := range node.with_pairs {
+ val, err := value.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ include_ctx[key] = val
+ }
+
+ // Execute the template
+ if node.lazy {
+ // Evaluate the filename
+ filename, err := node.filename_evaluator.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ if filename.String() == "" {
+ return ctx.Error("Filename for 'include'-tag evaluated to an empty string.", nil)
+ }
+
+ // Get include-filename
+ included_filename := ctx.template.set.resolveFilename(ctx.template, filename.String())
+
+ included_tpl, err2 := ctx.template.set.FromFile(included_filename)
+ if err2 != nil {
+ // if this is ReadFile error, and "if_exists" flag is enabled
+ if node.if_exists && err2.(*Error).Sender == "fromfile" {
+ return nil
+ }
+ return err2.(*Error)
+ }
+ err2 = included_tpl.ExecuteWriter(include_ctx, writer)
+ if err2 != nil {
+ return err2.(*Error)
+ }
+ return nil
+ } else {
+ // Template is already parsed with static filename
+ err := node.tpl.ExecuteWriter(include_ctx, writer)
+ if err != nil {
+ return err.(*Error)
+ }
+ return nil
+ }
+}
+
+type tagIncludeEmptyNode struct{}
+
+func (node *tagIncludeEmptyNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ return nil
+}
+
+func tagIncludeParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ include_node := &tagIncludeNode{
+ with_pairs: make(map[string]IEvaluator),
+ }
+
+ if filename_token := arguments.MatchType(TokenString); filename_token != nil {
+ // prepared, static template
+
+ // "if_exists" flag
+ if_exists := arguments.Match(TokenIdentifier, "if_exists") != nil
+
+ // Get include-filename
+ included_filename := doc.template.set.resolveFilename(doc.template, filename_token.Val)
+
+ // Parse the parent
+ include_node.filename = included_filename
+ included_tpl, err := doc.template.set.FromFile(included_filename)
+ if err != nil {
+ // if this is ReadFile error, and "if_exists" token presents we should create and empty node
+ if err.(*Error).Sender == "fromfile" && if_exists {
+ return &tagIncludeEmptyNode{}, nil
+ }
+ return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, filename_token)
+ }
+ include_node.tpl = included_tpl
+ } else {
+ // No String, then the user wants to use lazy-evaluation (slower, but possible)
+ filename_evaluator, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err.updateFromTokenIfNeeded(doc.template, filename_token)
+ }
+ include_node.filename_evaluator = filename_evaluator
+ include_node.lazy = true
+ include_node.if_exists = arguments.Match(TokenIdentifier, "if_exists") != nil // "if_exists" flag
+ }
+
+ // After having parsed the filename we're gonna parse the with+only options
+ if arguments.Match(TokenIdentifier, "with") != nil {
+ for arguments.Remaining() > 0 {
+ // We have at least one key=expr pair (because of starting "with")
+ key_token := arguments.MatchType(TokenIdentifier)
+ if key_token == nil {
+ return nil, arguments.Error("Expected an identifier", nil)
+ }
+ if arguments.Match(TokenSymbol, "=") == nil {
+ return nil, arguments.Error("Expected '='.", nil)
+ }
+ value_expr, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err.updateFromTokenIfNeeded(doc.template, key_token)
+ }
+
+ include_node.with_pairs[key_token.Val] = value_expr
+
+ // Only?
+ if arguments.Match(TokenIdentifier, "only") != nil {
+ include_node.only = true
+ break // stop parsing arguments because it's the last option
+ }
+ }
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed 'include'-tag arguments.", nil)
+ }
+
+ return include_node, nil
+}
+
+func init() {
+ RegisterTag("include", tagIncludeParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_lorem.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_lorem.go
new file mode 100644
index 0000000..ad56f28
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_lorem.go
@@ -0,0 +1,131 @@
+package pongo2
+
+import (
+ "math/rand"
+ "strings"
+ "time"
+)
+
+var (
+ tagLoremParagraphs = strings.Split(tagLoremText, "\n")
+ tagLoremWords = strings.Fields(tagLoremText)
+)
+
+type tagLoremNode struct {
+ position *Token
+ count int // number of paragraphs
+ method string // w = words, p = HTML paragraphs, b = plain-text (default is b)
+ random bool // does not use the default paragraph "Lorem ipsum dolor sit amet, ..."
+}
+
+func (node *tagLoremNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ switch node.method {
+ case "b":
+ if node.random {
+ for i := 0; i < node.count; i++ {
+ if i > 0 {
+ writer.WriteString("\n")
+ }
+ par := tagLoremParagraphs[rand.Intn(len(tagLoremParagraphs))]
+ writer.WriteString(par)
+ }
+ } else {
+ for i := 0; i < node.count; i++ {
+ if i > 0 {
+ writer.WriteString("\n")
+ }
+ par := tagLoremParagraphs[i%len(tagLoremParagraphs)]
+ writer.WriteString(par)
+ }
+ }
+ case "w":
+ if node.random {
+ for i := 0; i < node.count; i++ {
+ if i > 0 {
+ writer.WriteString(" ")
+ }
+ word := tagLoremWords[rand.Intn(len(tagLoremWords))]
+ writer.WriteString(word)
+ }
+ } else {
+ for i := 0; i < node.count; i++ {
+ if i > 0 {
+ writer.WriteString(" ")
+ }
+ word := tagLoremWords[i%len(tagLoremWords)]
+ writer.WriteString(word)
+ }
+ }
+ case "p":
+ if node.random {
+ for i := 0; i < node.count; i++ {
+ if i > 0 {
+ writer.WriteString("\n")
+ }
+ writer.WriteString("")
+ par := tagLoremParagraphs[rand.Intn(len(tagLoremParagraphs))]
+ writer.WriteString(par)
+ writer.WriteString("
")
+ }
+ } else {
+ for i := 0; i < node.count; i++ {
+ if i > 0 {
+ writer.WriteString("\n")
+ }
+ writer.WriteString("")
+ par := tagLoremParagraphs[i%len(tagLoremParagraphs)]
+ writer.WriteString(par)
+ writer.WriteString("
")
+
+ }
+ }
+ default:
+ panic("unsupported method")
+ }
+
+ return nil
+}
+
+func tagLoremParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ lorem_node := &tagLoremNode{
+ position: start,
+ count: 1,
+ method: "b",
+ }
+
+ if count_token := arguments.MatchType(TokenNumber); count_token != nil {
+ lorem_node.count = AsValue(count_token.Val).Integer()
+ }
+
+ if method_token := arguments.MatchType(TokenIdentifier); method_token != nil {
+ if method_token.Val != "w" && method_token.Val != "p" && method_token.Val != "b" {
+ return nil, arguments.Error("lorem-method must be either 'w', 'p' or 'b'.", nil)
+ }
+
+ lorem_node.method = method_token.Val
+ }
+
+ if arguments.MatchOne(TokenIdentifier, "random") != nil {
+ lorem_node.random = true
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed lorem-tag arguments.", nil)
+ }
+
+ return lorem_node, nil
+}
+
+func init() {
+ rand.Seed(time.Now().Unix())
+
+ RegisterTag("lorem", tagLoremParser)
+}
+
+const tagLoremText = `Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
+Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat.
+Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat. Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi.
+Nam liber tempor cum soluta nobis eleifend option congue nihil imperdiet doming id quod mazim placerat facer possim assum. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat. Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat.
+Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis.
+At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, At accusam aliquyam diam diam dolore dolores duo eirmod eos erat, et nonumy sed tempor et et invidunt justo labore Stet clita ea et gubergren, kasd magna no rebum. sanctus sea sed takimata ut vero voluptua. est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat.
+Consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.`
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_macro.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_macro.go
new file mode 100644
index 0000000..fdda8f1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_macro.go
@@ -0,0 +1,149 @@
+package pongo2
+
+import (
+ "bytes"
+ "fmt"
+)
+
+type tagMacroNode struct {
+ position *Token
+ name string
+ args_order []string
+ args map[string]IEvaluator
+ exported bool
+
+ wrapper *NodeWrapper
+}
+
+func (node *tagMacroNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ ctx.Private[node.name] = func(args ...*Value) *Value {
+ return node.call(ctx, args...)
+ }
+
+ return nil
+}
+
+func (node *tagMacroNode) call(ctx *ExecutionContext, args ...*Value) *Value {
+ args_ctx := make(Context)
+
+ for k, v := range node.args {
+ if v == nil {
+ // User did not provided a default value
+ args_ctx[k] = nil
+ } else {
+ // Evaluate the default value
+ value_expr, err := v.Evaluate(ctx)
+ if err != nil {
+ ctx.Logf(err.Error())
+ return AsSafeValue(err.Error())
+ }
+
+ args_ctx[k] = value_expr
+ }
+ }
+
+ if len(args) > len(node.args_order) {
+ // Too many arguments, we're ignoring them and just logging into debug mode.
+ err := ctx.Error(fmt.Sprintf("Macro '%s' called with too many arguments (%d instead of %d).",
+ node.name, len(args), len(node.args_order)), nil).updateFromTokenIfNeeded(ctx.template, node.position)
+
+ ctx.Logf(err.Error()) // TODO: This is a workaround, because the error is not returned yet to the Execution()-methods
+ return AsSafeValue(err.Error())
+ }
+
+ // Make a context for the macro execution
+ macroCtx := NewChildExecutionContext(ctx)
+
+ // Register all arguments in the private context
+ macroCtx.Private.Update(args_ctx)
+
+ for idx, arg_value := range args {
+ macroCtx.Private[node.args_order[idx]] = arg_value.Interface()
+ }
+
+ var b bytes.Buffer
+ err := node.wrapper.Execute(macroCtx, &b)
+ if err != nil {
+ return AsSafeValue(err.updateFromTokenIfNeeded(ctx.template, node.position).Error())
+ }
+
+ return AsSafeValue(b.String())
+}
+
+func tagMacroParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ macro_node := &tagMacroNode{
+ position: start,
+ args: make(map[string]IEvaluator),
+ }
+
+ name_token := arguments.MatchType(TokenIdentifier)
+ if name_token == nil {
+ return nil, arguments.Error("Macro-tag needs at least an identifier as name.", nil)
+ }
+ macro_node.name = name_token.Val
+
+ if arguments.MatchOne(TokenSymbol, "(") == nil {
+ return nil, arguments.Error("Expected '('.", nil)
+ }
+
+ for arguments.Match(TokenSymbol, ")") == nil {
+ arg_name_token := arguments.MatchType(TokenIdentifier)
+ if arg_name_token == nil {
+ return nil, arguments.Error("Expected argument name as identifier.", nil)
+ }
+ macro_node.args_order = append(macro_node.args_order, arg_name_token.Val)
+
+ if arguments.Match(TokenSymbol, "=") != nil {
+ // Default expression follows
+ arg_default_expr, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ macro_node.args[arg_name_token.Val] = arg_default_expr
+ } else {
+ // No default expression
+ macro_node.args[arg_name_token.Val] = nil
+ }
+
+ if arguments.Match(TokenSymbol, ")") != nil {
+ break
+ }
+ if arguments.Match(TokenSymbol, ",") == nil {
+ return nil, arguments.Error("Expected ',' or ')'.", nil)
+ }
+ }
+
+ if arguments.Match(TokenKeyword, "export") != nil {
+ macro_node.exported = true
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed macro-tag.", nil)
+ }
+
+ // Body wrapping
+ wrapper, endargs, err := doc.WrapUntilTag("endmacro")
+ if err != nil {
+ return nil, err
+ }
+ macro_node.wrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+
+ if macro_node.exported {
+ // Now register the macro if it wants to be exported
+ _, has := doc.template.exported_macros[macro_node.name]
+ if has {
+ return nil, doc.Error(fmt.Sprintf("Another macro with name '%s' already exported.", macro_node.name), start)
+ }
+ doc.template.exported_macros[macro_node.name] = macro_node
+ }
+
+ return macro_node, nil
+}
+
+func init() {
+ RegisterTag("macro", tagMacroParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_now.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_now.go
new file mode 100644
index 0000000..8e6ab32
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_now.go
@@ -0,0 +1,50 @@
+package pongo2
+
+import (
+ "time"
+)
+
+type tagNowNode struct {
+ position *Token
+ format string
+ fake bool
+}
+
+func (node *tagNowNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ var t time.Time
+ if node.fake {
+ t = time.Date(2014, time.February, 05, 18, 31, 45, 00, time.UTC)
+ } else {
+ t = time.Now()
+ }
+
+ writer.WriteString(t.Format(node.format))
+
+ return nil
+}
+
+func tagNowParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ now_node := &tagNowNode{
+ position: start,
+ }
+
+ format_token := arguments.MatchType(TokenString)
+ if format_token == nil {
+ return nil, arguments.Error("Expected a format string.", nil)
+ }
+ now_node.format = format_token.Val
+
+ if arguments.MatchOne(TokenIdentifier, "fake") != nil {
+ now_node.fake = true
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed now-tag arguments.", nil)
+ }
+
+ return now_node, nil
+}
+
+func init() {
+ RegisterTag("now", tagNowParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_set.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_set.go
new file mode 100644
index 0000000..be121c1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_set.go
@@ -0,0 +1,50 @@
+package pongo2
+
+type tagSetNode struct {
+ name string
+ expression IEvaluator
+}
+
+func (node *tagSetNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ // Evaluate expression
+ value, err := node.expression.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ ctx.Private[node.name] = value
+ return nil
+}
+
+func tagSetParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ node := &tagSetNode{}
+
+ // Parse variable name
+ typeToken := arguments.MatchType(TokenIdentifier)
+ if typeToken == nil {
+ return nil, arguments.Error("Expected an identifier.", nil)
+ }
+ node.name = typeToken.Val
+
+ if arguments.Match(TokenSymbol, "=") == nil {
+ return nil, arguments.Error("Expected '='.", nil)
+ }
+
+ // Variable expression
+ keyExpression, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ node.expression = keyExpression
+
+ // Remaining arguments
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed 'set'-tag arguments.", nil)
+ }
+
+ return node, nil
+}
+
+func init() {
+ RegisterTag("set", tagSetParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_spaceless.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_spaceless.go
new file mode 100644
index 0000000..405ff24
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_spaceless.go
@@ -0,0 +1,54 @@
+package pongo2
+
+import (
+ "bytes"
+ "regexp"
+)
+
+type tagSpacelessNode struct {
+ wrapper *NodeWrapper
+}
+
+var tagSpacelessRegexp = regexp.MustCompile(`(?U:(<.*>))([\t\n\v\f\r ]+)(?U:(<.*>))`)
+
+func (node *tagSpacelessNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ b := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB
+
+ err := node.wrapper.Execute(ctx, b)
+ if err != nil {
+ return err
+ }
+
+ s := b.String()
+ // Repeat this recursively
+ changed := true
+ for changed {
+ s2 := tagSpacelessRegexp.ReplaceAllString(s, "$1$3")
+ changed = s != s2
+ s = s2
+ }
+
+ writer.WriteString(s)
+
+ return nil
+}
+
+func tagSpacelessParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ spaceless_node := &tagSpacelessNode{}
+
+ wrapper, _, err := doc.WrapUntilTag("endspaceless")
+ if err != nil {
+ return nil, err
+ }
+ spaceless_node.wrapper = wrapper
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed spaceless-tag arguments.", nil)
+ }
+
+ return spaceless_node, nil
+}
+
+func init() {
+ RegisterTag("spaceless", tagSpacelessParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_ssi.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_ssi.go
new file mode 100644
index 0000000..71ba683
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_ssi.go
@@ -0,0 +1,68 @@
+package pongo2
+
+import (
+ "io/ioutil"
+)
+
+type tagSSINode struct {
+ filename string
+ content string
+ template *Template
+}
+
+func (node *tagSSINode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ if node.template != nil {
+ // Execute the template within the current context
+ includeCtx := make(Context)
+ includeCtx.Update(ctx.Public)
+ includeCtx.Update(ctx.Private)
+
+ err := node.template.execute(includeCtx, writer)
+ if err != nil {
+ return err.(*Error)
+ }
+ } else {
+ // Just print out the content
+ writer.WriteString(node.content)
+ }
+ return nil
+}
+
+func tagSSIParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ ssi_node := &tagSSINode{}
+
+ if file_token := arguments.MatchType(TokenString); file_token != nil {
+ ssi_node.filename = file_token.Val
+
+ if arguments.Match(TokenIdentifier, "parsed") != nil {
+ // parsed
+ temporary_tpl, err := doc.template.set.FromFile(doc.template.set.resolveFilename(doc.template, file_token.Val))
+ if err != nil {
+ return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, file_token)
+ }
+ ssi_node.template = temporary_tpl
+ } else {
+ // plaintext
+ buf, err := ioutil.ReadFile(doc.template.set.resolveFilename(doc.template, file_token.Val))
+ if err != nil {
+ return nil, (&Error{
+ Sender: "tag:ssi",
+ ErrorMsg: err.Error(),
+ }).updateFromTokenIfNeeded(doc.template, file_token)
+ }
+ ssi_node.content = string(buf)
+ }
+ } else {
+ return nil, arguments.Error("First argument must be a string.", nil)
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed SSI-tag argument.", nil)
+ }
+
+ return ssi_node, nil
+}
+
+func init() {
+ RegisterTag("ssi", tagSSIParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_templatetag.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_templatetag.go
new file mode 100644
index 0000000..16e1159
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_templatetag.go
@@ -0,0 +1,45 @@
+package pongo2
+
+type tagTemplateTagNode struct {
+ content string
+}
+
+var templateTagMapping = map[string]string{
+ "openblock": "{%",
+ "closeblock": "%}",
+ "openvariable": "{{",
+ "closevariable": "}}",
+ "openbrace": "{",
+ "closebrace": "}",
+ "opencomment": "{#",
+ "closecomment": "#}",
+}
+
+func (node *tagTemplateTagNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ writer.WriteString(node.content)
+ return nil
+}
+
+func tagTemplateTagParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ tt_node := &tagTemplateTagNode{}
+
+ if arg_token := arguments.MatchType(TokenIdentifier); arg_token != nil {
+ output, found := templateTagMapping[arg_token.Val]
+ if !found {
+ return nil, arguments.Error("Argument not found", arg_token)
+ }
+ tt_node.content = output
+ } else {
+ return nil, arguments.Error("Identifier expected.", nil)
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed templatetag-tag argument.", nil)
+ }
+
+ return tt_node, nil
+}
+
+func init() {
+ RegisterTag("templatetag", tagTemplateTagParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_widthratio.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_widthratio.go
new file mode 100644
index 0000000..122d6a1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_widthratio.go
@@ -0,0 +1,83 @@
+package pongo2
+
+import (
+ "fmt"
+ "math"
+)
+
+type tagWidthratioNode struct {
+ position *Token
+ current, max IEvaluator
+ width IEvaluator
+ ctx_name string
+}
+
+func (node *tagWidthratioNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ current, err := node.current.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ max, err := node.max.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ width, err := node.width.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ value := int(math.Ceil(current.Float()/max.Float()*width.Float() + 0.5))
+
+ if node.ctx_name == "" {
+ writer.WriteString(fmt.Sprintf("%d", value))
+ } else {
+ ctx.Private[node.ctx_name] = value
+ }
+
+ return nil
+}
+
+func tagWidthratioParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ widthratio_node := &tagWidthratioNode{
+ position: start,
+ }
+
+ current, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ widthratio_node.current = current
+
+ max, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ widthratio_node.max = max
+
+ width, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ widthratio_node.width = width
+
+ if arguments.MatchOne(TokenKeyword, "as") != nil {
+ // Name follows
+ name_token := arguments.MatchType(TokenIdentifier)
+ if name_token == nil {
+ return nil, arguments.Error("Expected name (identifier).", nil)
+ }
+ widthratio_node.ctx_name = name_token.Val
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed widthratio-tag arguments.", nil)
+ }
+
+ return widthratio_node, nil
+}
+
+func init() {
+ RegisterTag("widthratio", tagWidthratioParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_with.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_with.go
new file mode 100644
index 0000000..4d24e23
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_with.go
@@ -0,0 +1,88 @@
+package pongo2
+
+type tagWithNode struct {
+ with_pairs map[string]IEvaluator
+ wrapper *NodeWrapper
+}
+
+func (node *tagWithNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ //new context for block
+ withctx := NewChildExecutionContext(ctx)
+
+ // Put all custom with-pairs into the context
+ for key, value := range node.with_pairs {
+ val, err := value.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ withctx.Private[key] = val
+ }
+
+ return node.wrapper.Execute(withctx, writer)
+}
+
+func tagWithParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ with_node := &tagWithNode{
+ with_pairs: make(map[string]IEvaluator),
+ }
+
+ if arguments.Count() == 0 {
+ return nil, arguments.Error("Tag 'with' requires at least one argument.", nil)
+ }
+
+ wrapper, endargs, err := doc.WrapUntilTag("endwith")
+ if err != nil {
+ return nil, err
+ }
+ with_node.wrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+
+ // Scan through all arguments to see which style the user uses (old or new style).
+ // If we find any "as" keyword we will enforce old style; otherwise we will use new style.
+ old_style := false // by default we're using the new_style
+ for i := 0; i < arguments.Count(); i++ {
+ if arguments.PeekN(i, TokenKeyword, "as") != nil {
+ old_style = true
+ break
+ }
+ }
+
+ for arguments.Remaining() > 0 {
+ if old_style {
+ value_expr, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ if arguments.Match(TokenKeyword, "as") == nil {
+ return nil, arguments.Error("Expected 'as' keyword.", nil)
+ }
+ key_token := arguments.MatchType(TokenIdentifier)
+ if key_token == nil {
+ return nil, arguments.Error("Expected an identifier", nil)
+ }
+ with_node.with_pairs[key_token.Val] = value_expr
+ } else {
+ key_token := arguments.MatchType(TokenIdentifier)
+ if key_token == nil {
+ return nil, arguments.Error("Expected an identifier", nil)
+ }
+ if arguments.Match(TokenSymbol, "=") == nil {
+ return nil, arguments.Error("Expected '='.", nil)
+ }
+ value_expr, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ with_node.with_pairs[key_token.Val] = value_expr
+ }
+ }
+
+ return with_node, nil
+}
+
+func init() {
+ RegisterTag("with", tagWithParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template.go b/Godeps/_workspace/src/github.com/flosch/pongo2/template.go
new file mode 100644
index 0000000..cb67b22
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template.go
@@ -0,0 +1,191 @@
+package pongo2
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+)
+
+type TemplateWriter interface {
+ io.Writer
+ WriteString(string) (int, error)
+}
+
+type templateWriter struct {
+ w io.Writer
+}
+
+func (tw *templateWriter) WriteString(s string) (int, error) {
+ return tw.w.Write([]byte(s))
+}
+
+func (tw *templateWriter) Write(b []byte) (int, error) {
+ return tw.w.Write(b)
+}
+
+type Template struct {
+ set *TemplateSet
+
+ // Input
+ is_tpl_string bool
+ name string
+ tpl string
+ size int
+
+ // Calculation
+ tokens []*Token
+ parser *Parser
+
+ // first come, first serve (it's important to not override existing entries in here)
+ level int
+ parent *Template
+ child *Template
+ blocks map[string]*NodeWrapper
+ exported_macros map[string]*tagMacroNode
+
+ // Output
+ root *nodeDocument
+}
+
+func newTemplateString(set *TemplateSet, tpl string) (*Template, error) {
+ return newTemplate(set, "", true, tpl)
+}
+
+func newTemplate(set *TemplateSet, name string, is_tpl_string bool, tpl string) (*Template, error) {
+ // Create the template
+ t := &Template{
+ set: set,
+ is_tpl_string: is_tpl_string,
+ name: name,
+ tpl: tpl,
+ size: len(tpl),
+ blocks: make(map[string]*NodeWrapper),
+ exported_macros: make(map[string]*tagMacroNode),
+ }
+
+ // Tokenize it
+ tokens, err := lex(name, tpl)
+ if err != nil {
+ return nil, err
+ }
+ t.tokens = tokens
+
+ // For debugging purposes, show all tokens:
+ /*for i, t := range tokens {
+ fmt.Printf("%3d. %s\n", i, t)
+ }*/
+
+ // Parse it
+ err = t.parse()
+ if err != nil {
+ return nil, err
+ }
+
+ return t, nil
+}
+
+func (tpl *Template) execute(context Context, writer TemplateWriter) error {
+ // Determine the parent to be executed (for template inheritance)
+ parent := tpl
+ for parent.parent != nil {
+ parent = parent.parent
+ }
+
+ // Create context if none is given
+ newContext := make(Context)
+ newContext.Update(tpl.set.Globals)
+
+ if context != nil {
+ newContext.Update(context)
+
+ if len(newContext) > 0 {
+ // Check for context name syntax
+ err := newContext.checkForValidIdentifiers()
+ if err != nil {
+ return err
+ }
+
+ // Check for clashes with macro names
+ for k, _ := range newContext {
+ _, has := tpl.exported_macros[k]
+ if has {
+ return &Error{
+ Filename: tpl.name,
+ Sender: "execution",
+ ErrorMsg: fmt.Sprintf("Context key name '%s' clashes with macro '%s'.", k, k),
+ }
+ }
+ }
+ }
+ }
+
+ // Create operational context
+ ctx := newExecutionContext(parent, newContext)
+
+ // Run the selected document
+ if err := parent.root.Execute(ctx, writer); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (tpl *Template) newTemplateWriterAndExecute(context Context, writer io.Writer) error {
+ return tpl.execute(context, &templateWriter{w: writer})
+}
+
+func (tpl *Template) newBufferAndExecute(context Context) (*bytes.Buffer, error) {
+ // Create output buffer
+ // We assume that the rendered template will be 30% larger
+ buffer := bytes.NewBuffer(make([]byte, 0, int(float64(tpl.size)*1.3)))
+ if err := tpl.execute(context, buffer); err != nil {
+ return nil, err
+ }
+ return buffer, nil
+}
+
+// Executes the template with the given context and writes to writer (io.Writer)
+// on success. Context can be nil. Nothing is written on error; instead the error
+// is being returned.
+func (tpl *Template) ExecuteWriter(context Context, writer io.Writer) error {
+ buf, err := tpl.newBufferAndExecute(context)
+ if err != nil {
+ return err
+ }
+ _, err = buf.WriteTo(writer)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// Same as ExecuteWriter. The only difference between both functions is that
+// this function might already have written parts of the generated template in the
+// case of an execution error because there's no intermediate buffer involved for
+// performance reasons. This is handy if you need high performance template
+// generation or if you want to manage your own pool of buffers.
+func (tpl *Template) ExecuteWriterUnbuffered(context Context, writer io.Writer) error {
+ return tpl.newTemplateWriterAndExecute(context, writer)
+}
+
+// Executes the template and returns the rendered template as a []byte
+func (tpl *Template) ExecuteBytes(context Context) ([]byte, error) {
+ // Execute template
+ buffer, err := tpl.newBufferAndExecute(context)
+ if err != nil {
+ return nil, err
+ }
+ return buffer.Bytes(), nil
+}
+
+// Executes the template and returns the rendered template as a string
+func (tpl *Template) Execute(context Context) (string, error) {
+ // Execute template
+ buffer, err := tpl.newBufferAndExecute(context)
+ if err != nil {
+ return "", err
+ }
+
+ return buffer.String(), nil
+
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_sets.go b/Godeps/_workspace/src/github.com/flosch/pongo2/template_sets.go
new file mode 100644
index 0000000..c582c5d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_sets.go
@@ -0,0 +1,296 @@
+package pongo2
+
+import (
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+ "sync"
+)
+
+// A template set allows you to create your own group of templates with their own global context (which is shared
+// among all members of the set), their own configuration (like a specific base directory) and their own sandbox.
+// It's useful for a separation of different kind of templates (e. g. web templates vs. mail templates).
+type TemplateSet struct {
+ name string
+
+ // Globals will be provided to all templates created within this template set
+ Globals Context
+
+ // If debug is true (default false), ExecutionContext.Logf() will work and output to STDOUT. Furthermore,
+ // FromCache() won't cache the templates. Make sure to synchronize the access to it in case you're changing this
+ // variable during program execution (and template compilation/execution).
+ Debug bool
+
+ // Base directory: If you set the base directory (string is non-empty), all filename lookups in tags/filters are
+ // relative to this directory. If it's empty, all lookups are relative to the current filename which is importing.
+ baseDirectory string
+
+ // Sandbox features
+ // - Limit access to directories (using SandboxDirectories)
+ // - Disallow access to specific tags and/or filters (using BanTag() and BanFilter())
+ //
+ // You can limit file accesses (for all tags/filters which are using pongo2's file resolver technique)
+ // to these sandbox directories. All default pongo2 filters/tags are respecting these restrictions.
+ // For example, if you only have your base directory in the list, a {% ssi "/etc/passwd" %} will not work.
+ // No items in SandboxDirectories means no restrictions at all.
+ //
+ // For efficiency reasons you can ban tags/filters only *before* you have added your first
+ // template to the set (restrictions are statically checked). After you added one, it's not possible anymore
+ // (for your personal security).
+ //
+ // SandboxDirectories can be changed at runtime. Please synchronize the access to it if you need to change it
+ // after you've added your first template to the set. You *must* use this match pattern for your directories:
+ // http://golang.org/pkg/path/filepath/#Match
+ SandboxDirectories []string
+ firstTemplateCreated bool
+ bannedTags map[string]bool
+ bannedFilters map[string]bool
+
+ // Template cache (for FromCache())
+ templateCache map[string]*Template
+ templateCacheMutex sync.Mutex
+}
+
+// Create your own template sets to separate different kind of templates (e. g. web from mail templates) with
+// different globals or other configurations (like base directories).
+func NewSet(name string) *TemplateSet {
+ return &TemplateSet{
+ name: name,
+ Globals: make(Context),
+ bannedTags: make(map[string]bool),
+ bannedFilters: make(map[string]bool),
+ templateCache: make(map[string]*Template),
+ }
+}
+
+// Use this function to set your template set's base directory. This directory will be used for any relative
+// path in filters, tags and From*-functions to determine your template.
+func (set *TemplateSet) SetBaseDirectory(name string) error {
+ // Make the path absolute
+ if !filepath.IsAbs(name) {
+ abs, err := filepath.Abs(name)
+ if err != nil {
+ return err
+ }
+ name = abs
+ }
+
+ // Check for existence
+ fi, err := os.Stat(name)
+ if err != nil {
+ return err
+ }
+ if !fi.IsDir() {
+ return fmt.Errorf("The given path '%s' is not a directory.")
+ }
+
+ set.baseDirectory = name
+ return nil
+}
+
+func (set *TemplateSet) BaseDirectory() string {
+ return set.baseDirectory
+}
+
+// Ban a specific tag for this template set. See more in the documentation for TemplateSet.
+func (set *TemplateSet) BanTag(name string) {
+ _, has := tags[name]
+ if !has {
+ panic(fmt.Sprintf("Tag '%s' not found.", name))
+ }
+ if set.firstTemplateCreated {
+ panic("You cannot ban any tags after you've added your first template to your template set.")
+ }
+ _, has = set.bannedTags[name]
+ if has {
+ panic(fmt.Sprintf("Tag '%s' is already banned.", name))
+ }
+ set.bannedTags[name] = true
+}
+
+// Ban a specific filter for this template set. See more in the documentation for TemplateSet.
+func (set *TemplateSet) BanFilter(name string) {
+ _, has := filters[name]
+ if !has {
+ panic(fmt.Sprintf("Filter '%s' not found.", name))
+ }
+ if set.firstTemplateCreated {
+ panic("You cannot ban any filters after you've added your first template to your template set.")
+ }
+ _, has = set.bannedFilters[name]
+ if has {
+ panic(fmt.Sprintf("Filter '%s' is already banned.", name))
+ }
+ set.bannedFilters[name] = true
+}
+
+// FromCache() is a convenient method to cache templates. It is thread-safe
+// and will only compile the template associated with a filename once.
+// If TemplateSet.Debug is true (for example during development phase),
+// FromCache() will not cache the template and instead recompile it on any
+// call (to make changes to a template live instantaneously).
+// Like FromFile(), FromCache() takes a relative path to a set base directory.
+// Sandbox restrictions apply (if given).
+func (set *TemplateSet) FromCache(filename string) (*Template, error) {
+ if set.Debug {
+ // Recompile on any request
+ return set.FromFile(filename)
+ } else {
+ // Cache the template
+ cleaned_filename := set.resolveFilename(nil, filename)
+
+ set.templateCacheMutex.Lock()
+ defer set.templateCacheMutex.Unlock()
+
+ tpl, has := set.templateCache[cleaned_filename]
+
+ // Cache miss
+ if !has {
+ tpl, err := set.FromFile(cleaned_filename)
+ if err != nil {
+ return nil, err
+ }
+ set.templateCache[cleaned_filename] = tpl
+ return tpl, nil
+ }
+
+ // Cache hit
+ return tpl, nil
+ }
+}
+
+// Loads a template from string and returns a Template instance.
+func (set *TemplateSet) FromString(tpl string) (*Template, error) {
+ set.firstTemplateCreated = true
+
+ return newTemplateString(set, tpl)
+}
+
+// Loads a template from a filename and returns a Template instance.
+// If a base directory is set, the filename must be either relative to it
+// or be an absolute path. Sandbox restrictions (SandboxDirectories) apply
+// if given.
+func (set *TemplateSet) FromFile(filename string) (*Template, error) {
+ set.firstTemplateCreated = true
+
+ buf, err := ioutil.ReadFile(set.resolveFilename(nil, filename))
+ if err != nil {
+ return nil, &Error{
+ Filename: filename,
+ Sender: "fromfile",
+ ErrorMsg: err.Error(),
+ }
+ }
+ return newTemplate(set, filename, false, string(buf))
+}
+
+// Shortcut; renders a template string directly. Panics when providing a
+// malformed template or an error occurs during execution.
+func (set *TemplateSet) RenderTemplateString(s string, ctx Context) string {
+ set.firstTemplateCreated = true
+
+ tpl := Must(set.FromString(s))
+ result, err := tpl.Execute(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return result
+}
+
+// Shortcut; renders a template file directly. Panics when providing a
+// malformed template or an error occurs during execution.
+func (set *TemplateSet) RenderTemplateFile(fn string, ctx Context) string {
+ set.firstTemplateCreated = true
+
+ tpl := Must(set.FromFile(fn))
+ result, err := tpl.Execute(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return result
+}
+
+func (set *TemplateSet) logf(format string, args ...interface{}) {
+ if set.Debug {
+ logger.Printf(fmt.Sprintf("[template set: %s] %s", set.name, format), args...)
+ }
+}
+
+// Resolves a filename relative to the base directory. Absolute paths are allowed.
+// If sandbox restrictions are given (SandboxDirectories), they will be respected and checked.
+// On sandbox restriction violation, resolveFilename() panics.
+func (set *TemplateSet) resolveFilename(tpl *Template, filename string) (resolved_path string) {
+ if len(set.SandboxDirectories) > 0 {
+ defer func() {
+ // Remove any ".." or other crap
+ resolved_path = filepath.Clean(resolved_path)
+
+ // Make the path absolute
+ abs_path, err := filepath.Abs(resolved_path)
+ if err != nil {
+ panic(err)
+ }
+ resolved_path = abs_path
+
+ // Check against the sandbox directories (once one pattern matches, we're done and can allow it)
+ for _, pattern := range set.SandboxDirectories {
+ matched, err := filepath.Match(pattern, resolved_path)
+ if err != nil {
+ panic("Wrong sandbox directory match pattern (see http://golang.org/pkg/path/filepath/#Match).")
+ }
+ if matched {
+ // OK!
+ return
+ }
+ }
+
+ // No pattern matched, we have to log+deny the request
+ set.logf("Access attempt outside of the sandbox directories (blocked): '%s'", resolved_path)
+ resolved_path = ""
+ }()
+ }
+
+ if filepath.IsAbs(filename) {
+ return filename
+ }
+
+ if set.baseDirectory == "" {
+ if tpl != nil {
+ if tpl.is_tpl_string {
+ return filename
+ }
+ base := filepath.Dir(tpl.name)
+ return filepath.Join(base, filename)
+ }
+ return filename
+ } else {
+ return filepath.Join(set.baseDirectory, filename)
+ }
+}
+
+// Logging function (internally used)
+func logf(format string, items ...interface{}) {
+ if debug {
+ logger.Printf(format, items...)
+ }
+}
+
+var (
+ debug bool // internal debugging
+ logger = log.New(os.Stdout, "[pongo2] ", log.LstdFlags)
+
+ // Creating a default set
+ DefaultSet = NewSet("default")
+
+ // Methods on the default set
+ FromString = DefaultSet.FromString
+ FromFile = DefaultSet.FromFile
+ FromCache = DefaultSet.FromCache
+ RenderTemplateString = DefaultSet.RenderTemplateString
+ RenderTemplateFile = DefaultSet.RenderTemplateFile
+
+ // Globals for the default set
+ Globals = DefaultSet.Globals
+)
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/autoescape.tpl b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/autoescape.tpl
new file mode 100644
index 0000000..f02fe9d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/autoescape.tpl
@@ -0,0 +1,10 @@
+{{ "" }}
+{% autoescape off %}
+{{ "" }}
+{% endautoescape %}
+{% autoescape on %}
+{{ "" }}
+{% endautoescape %}
+{% autoescape off %}
+{{ ""|escape }}
+{% endautoescape %}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/autoescape.tpl.out b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/autoescape.tpl.out
new file mode 100644
index 0000000..5955cc6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/autoescape.tpl.out
@@ -0,0 +1,9 @@
+<script>alert('xss');</script>
+
+
+
+
+<script>alert('xss');</script>
+
+
+<script>alert('xss');</script>
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/base_dir_test/base.html b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/base_dir_test/base.html
new file mode 100644
index 0000000..17044cf
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/base_dir_test/base.html
@@ -0,0 +1 @@
+Hello from {{ base_directory }}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/base_dir_test/subdir/include.html b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/base_dir_test/subdir/include.html
new file mode 100644
index 0000000..ad17c3b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/base_dir_test/subdir/include.html
@@ -0,0 +1 @@
+{% include "base.html" %}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/base_dir_test/subdir/index.html b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/base_dir_test/subdir/index.html
new file mode 100644
index 0000000..94d9808
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/base_dir_test/subdir/index.html
@@ -0,0 +1 @@
+{% extends "base.html" %}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/base_dir_test/subdir/ssi.html b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/base_dir_test/subdir/ssi.html
new file mode 100644
index 0000000..584026f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/base_dir_test/subdir/ssi.html
@@ -0,0 +1 @@
+{% ssi "base.html" parsed %}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/complex.tpl b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/complex.tpl
new file mode 100644
index 0000000..c707fb1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/complex.tpl
@@ -0,0 +1,32 @@
+{# A more complex template using pongo2 (fully django-compatible template) #}
+
+
+
+
+ My blog page
+
+
+
+ Blogpost
+
+ {{ complex.post.Text|safe }}
+
+
+ Comments
+
+ {% for comment in complex.comments %}
+ {{ forloop.Counter }}. Comment ({{ forloop.Revcounter}} comment{{ forloop.Revcounter|pluralize:"s" }} left)
+ From: {{ comment.Author.Name }} ({{ comment.Author.Validated|yesno:"validated,not validated,unknown validation status" }})
+
+ {% if complex.is_admin(comment.Author) %}
+ This user is an admin (verify: {{ comment.Author.Is_admin }})!
+ {% else %}
+ This user is not admin!
+ {% endif %}
+
+ Written {{ comment.Date }}
+ {{ comment.Text|striptags }}
+ {% endfor %}
+
+
+
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/complex.tpl.out b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/complex.tpl.out
new file mode 100644
index 0000000..7fa3e1d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/complex.tpl.out
@@ -0,0 +1,50 @@
+
+
+
+
+
+ My blog page
+
+
+
+ Blogpost
+
+
Hello!
Welcome to my new blog page. I'm using pongo2 which supports {{ variables }} and {% tags %}.
+
+
+ Comments
+
+
+ 1. Comment (3 comments left)
+ From: user1 (validated)
+
+
+ This user is not admin!
+
+
+ Written 2014-06-10 15:30:15 +0000 UTC
+ "pongo2 is nice!"
+
+ 2. Comment (2 comments left)
+ From: user2 (validated)
+
+
+ This user is an admin (verify: True)!
+
+
+ Written 2011-03-21 08:37:56.000000012 +0000 UTC
+ comment2 with unsafe tags in it
+
+ 3. Comment (1 comment left)
+ From: user3 (not validated)
+
+
+ This user is not admin!
+
+
+ Written 2014-06-10 15:30:15 +0000 UTC
+ hello! there
+
+
+
+
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/cycle.tpl b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/cycle.tpl
new file mode 100644
index 0000000..967d82f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/cycle.tpl
@@ -0,0 +1,22 @@
+{% for item in simple.multiple_item_list %}
+ '{% cycle "item1" simple.name simple.number %}'
+{% endfor %}
+{% for item in simple.multiple_item_list %}
+ '{% cycle "item1" simple.name simple.number as cycleitem %}'
+ May I present the cycle item again: '{{ cycleitem }}'
+{% endfor %}
+{% for item in simple.multiple_item_list %}
+ '{% cycle "item1" simple.name simple.number as cycleitem silent %}'
+ May I present the cycle item: '{{ cycleitem }}'
+{% endfor %}
+{% for item in simple.multiple_item_list %}
+ '{% cycle "item1" simple.name simple.number as cycleitem silent %}'
+ May I present the cycle item: '{{ cycleitem }}'
+ {% include "inheritance/cycle_include.tpl" %}
+{% endfor %}
+'{% cycle "item1" simple.name simple.number as cycleitem %}'
+'{% cycle cycleitem %}'
+'{% cycle "item1" simple.name simple.number as cycleitem silent %}'
+'{{ cycleitem }}'
+'{% cycle cycleitem %}'
+'{{ cycleitem }}'
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/cycle.tpl.out b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/cycle.tpl.out
new file mode 100644
index 0000000..b966fb3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/cycle.tpl.out
@@ -0,0 +1,130 @@
+
+ 'item1'
+
+ 'john doe'
+
+ '42'
+
+ 'item1'
+
+ 'john doe'
+
+ '42'
+
+ 'item1'
+
+ 'john doe'
+
+ '42'
+
+ 'item1'
+
+
+ 'item1'
+ May I present the cycle item again: 'item1'
+
+ 'john doe'
+ May I present the cycle item again: 'john doe'
+
+ '42'
+ May I present the cycle item again: '42'
+
+ 'item1'
+ May I present the cycle item again: 'item1'
+
+ 'john doe'
+ May I present the cycle item again: 'john doe'
+
+ '42'
+ May I present the cycle item again: '42'
+
+ 'item1'
+ May I present the cycle item again: 'item1'
+
+ 'john doe'
+ May I present the cycle item again: 'john doe'
+
+ '42'
+ May I present the cycle item again: '42'
+
+ 'item1'
+ May I present the cycle item again: 'item1'
+
+
+ ''
+ May I present the cycle item: 'item1'
+
+ ''
+ May I present the cycle item: 'john doe'
+
+ ''
+ May I present the cycle item: '42'
+
+ ''
+ May I present the cycle item: 'item1'
+
+ ''
+ May I present the cycle item: 'john doe'
+
+ ''
+ May I present the cycle item: '42'
+
+ ''
+ May I present the cycle item: 'item1'
+
+ ''
+ May I present the cycle item: 'john doe'
+
+ ''
+ May I present the cycle item: '42'
+
+ ''
+ May I present the cycle item: 'item1'
+
+
+ ''
+ May I present the cycle item: 'item1'
+ Included 'item1'.
+
+ ''
+ May I present the cycle item: 'john doe'
+ Included 'john doe'.
+
+ ''
+ May I present the cycle item: '42'
+ Included '42'.
+
+ ''
+ May I present the cycle item: 'item1'
+ Included 'item1'.
+
+ ''
+ May I present the cycle item: 'john doe'
+ Included 'john doe'.
+
+ ''
+ May I present the cycle item: '42'
+ Included '42'.
+
+ ''
+ May I present the cycle item: 'item1'
+ Included 'item1'.
+
+ ''
+ May I present the cycle item: 'john doe'
+ Included 'john doe'.
+
+ ''
+ May I present the cycle item: '42'
+ Included '42'.
+
+ ''
+ May I present the cycle item: 'item1'
+ Included 'item1'.
+
+'item1'
+'john doe'
+''
+'item1'
+''
+'john doe'
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/empty.tpl b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/empty.tpl
new file mode 100644
index 0000000..e69de29
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/empty.tpl.out b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/empty.tpl.out
new file mode 100644
index 0000000..e69de29
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/expressions.tpl b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/expressions.tpl
new file mode 100644
index 0000000..caada14
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/expressions.tpl
@@ -0,0 +1,69 @@
+integers and complex expressions
+{{ 10-100 }}
+{{ -(10-100) }}
+{{ -(-(10-100)) }}
+{{ -1 * (-(-(10-100))) }}
+{{ -1 * (-(-(10-100)) ^ 2) ^ 3 + 3 * (5 - 17) + 1 + 2 }}
+
+floats
+{{ 5.5 }}
+{{ 5.172841 }}
+{{ 5.5 - 1.5 == 4 }}
+{{ 5.5 - 1.5 == 4.0 }}
+
+mul/div
+{{ 2 * 5 }}
+{{ 2 * 5.0 }}
+{{ 2 * 0 }}
+{{ 2.5 * 5.3 }}
+{{ 1/2 }}
+{{ 1/2.0 }}
+{{ 1/0.000001 }}
+
+logic expressions
+{{ !true }}
+{{ !(true || false) }}
+{{ true || false }}
+{{ true or false }}
+{{ false or false }}
+{{ false || false }}
+{{ true && (true && (true && (true && (1 == 1 || false)))) }}
+
+float comparison
+{{ 5.5 <= 5.5 }}
+{{ 5.5 < 5.5 }}
+{{ 5.5 > 5.5 }}
+{{ 5.5 >= 5.5 }}
+
+remainders
+{{ (simple.number+7)%7 }}
+{{ (simple.number+7)%7 == 0 }}
+{{ (simple.number+7)%6 }}
+
+in/not in
+{{ 5 in simple.intmap }}
+{{ 2 in simple.intmap }}
+{{ 7 in simple.intmap }}
+{{ !(5 in simple.intmap) }}
+{{ not(7 in simple.intmap) }}
+{{ 1 in simple.multiple_item_list }}
+{{ 4 in simple.multiple_item_list }}
+{{ !(4 in simple.multiple_item_list) }}
+{{ "Hello" in simple.misc_list }}
+{{ "Hello2" in simple.misc_list }}
+{{ 99 in simple.misc_list }}
+{{ False in simple.misc_list }}
+
+issue #48 (associativity for infix operators)
+{{ 34/3*3 }}
+{{ 10 + 24 / 6 / 2 }}
+{{ 6 - 4 - 2 }}
+
+issue #64 (uint comparison with int const)
+{{ simple.uint }}
+{{ simple.uint == 8 }}
+{{ simple.uint == 9 }}
+{{ simple.uint >= 8 }}
+{{ simple.uint <= 8 }}
+{{ simple.uint < 8 }}
+{{ simple.uint > 8 }}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/expressions.tpl.out b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/expressions.tpl.out
new file mode 100644
index 0000000..d710fc8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/expressions.tpl.out
@@ -0,0 +1,69 @@
+integers and complex expressions
+-90
+90
+-90
+90
+531440999967.000000
+
+floats
+5.500000
+5.172841
+False
+True
+
+mul/div
+10
+10.000000
+0
+13.250000
+0
+0.500000
+1000000.000000
+
+logic expressions
+False
+False
+True
+True
+False
+False
+True
+
+float comparison
+True
+False
+False
+True
+
+remainders
+0
+True
+1
+
+in/not in
+True
+True
+False
+False
+True
+True
+False
+True
+True
+False
+True
+False
+
+issue #48 (associativity for infix operators)
+33
+12
+0
+
+issue #64 (uint comparison with int const)
+8
+True
+False
+True
+True
+False
+False
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/extends.tpl b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/extends.tpl
new file mode 100644
index 0000000..7216d05
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/extends.tpl
@@ -0,0 +1,3 @@
+{% extends "inheritance/base.tpl" %}
+
+{% block content %}Extends' content{% endblock %}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/extends.tpl.out b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/extends.tpl.out
new file mode 100644
index 0000000..4c535c7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/extends.tpl.out
@@ -0,0 +1 @@
+Start#This is base's bodyExtends' content#End
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/filters-compilation.err b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/filters-compilation.err
new file mode 100644
index 0000000..cc5c8cb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/filters-compilation.err
@@ -0,0 +1,5 @@
+{{ }}
+{{ (1 - 1 }}
+{{ 1|float: }}
+{{ "test"|non_existent_filter }}
+{{ "test"|"test" }}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/filters-compilation.err.out b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/filters-compilation.err.out
new file mode 100644
index 0000000..3562fae
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/filters-compilation.err.out
@@ -0,0 +1,5 @@
+.*Expected either a number, string, keyword or identifier\.
+.*Closing bracket expected after expression
+.*Filter parameter required after ':'.*
+.*Filter 'non_existent_filter' does not exist\.
+.*Filter name must be an identifier\.
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/filters-execution.err b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/filters-execution.err
new file mode 100644
index 0000000..da761b2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/filters-execution.err
@@ -0,0 +1,3 @@
+{{ -(true || false) }}
+{{ simple.func_add("test", 5) }}
+{{ simple.func_variadic_sum_int("foo") }}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/filters-execution.err.out b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/filters-execution.err.out
new file mode 100644
index 0000000..e83ae01
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/filters-execution.err.out
@@ -0,0 +1,3 @@
+.*where: execution.*Negative sign on a non\-number expression
+.*Function input argument 0 of 'simple.func_add' must be of type int or \*pongo2.Value \(not string\).
+.*Function variadic input argument of 'simple.func_variadic_sum_int' must be of type int or \*pongo2.Value \(not string\).
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/filters.tpl b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/filters.tpl
new file mode 100644
index 0000000..c15468a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/filters.tpl
@@ -0,0 +1,304 @@
+add
+{{ 5|add:2 }}
+{{ 5|add:simple.number }}
+{{ 5|add:nothing }}
+{{ 5|add:"test" }}
+{{ "hello "|add:"john doe" }}
+{{ "hello "|add:simple.name }}
+
+addslashes
+{{ "plain text"|addslashes|safe }}
+{{ simple.escape_text|addslashes|safe }}
+
+capfirst
+{{ ""|capfirst }}
+{{ 5|capfirst }}
+{{ "h"|capfirst }}
+{{ "hello there!"|capfirst }}
+{{ simple.chinese_hello_world|capfirst }}
+
+cut
+{{ 15|cut:"5" }}
+{{ "Hello world"|cut: " " }}
+
+default
+{{ simple.nothing|default:"n/a" }}
+{{ nothing|default:simple.number }}
+{{ simple.number|default:"n/a" }}
+{{ 5|default:"n/a" }}
+
+default_if_none
+{{ simple.nothing|default_if_none:"n/a" }}
+{{ ""|default_if_none:"n/a" }}
+{{ nil|default_if_none:"n/a" }}
+
+get_digit
+{{ 1234567890|get_digit:0 }}
+{{ 1234567890|get_digit }}
+{{ 1234567890|get_digit:2 }}
+{{ 1234567890|get_digit:"4" }}
+{{ 1234567890|get_digit:10 }}
+{{ 1234567890|get_digit:15 }}
+
+safe
+{{ "" %}
+{% firstof doesnotexist ""|safe %}
+{% firstof doesnotexist simple.uint 42 %}
+{% firstof doesnotexist "test" simple.number 42 %}
+{% firstof %}
+{% firstof "test" "test2" %}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/firstof.tpl.out b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/firstof.tpl.out
new file mode 100644
index 0000000..5ae55ad
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/firstof.tpl.out
@@ -0,0 +1,7 @@
+42
+<script>alert('xss');</script>
+
+8
+test
+
+test
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/for.tpl b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/for.tpl
new file mode 100644
index 0000000..51e144c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/for.tpl
@@ -0,0 +1,9 @@
+{% for comment in complex.comments %}[{{ forloop.Counter }} {{ forloop.Counter0 }} {{ forloop.First }} {{ forloop.Last }} {{ forloop.Revcounter }} {{ forloop.Revcounter0 }}] {{ comment.Author.Name }}
+
+{# nested loop #}
+{% for char in comment.Text %}{{forloop.Parentloop.Counter0}}.{{forloop.Counter0}}:{{ char|safe }} {% endfor %}
+
+{% endfor %}
+
+reversed
+'{% for item in simple.multiple_item_list reversed %}{{ item }} {% endfor %}'
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/for.tpl.out b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/for.tpl.out
new file mode 100644
index 0000000..18258a2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/for.tpl.out
@@ -0,0 +1,19 @@
+[1 0 True False 3 2] user1
+
+
+0.0:" 0.1:p 0.2:o 0.3:n 0.4:g 0.5:o 0.6:2 0.7: 0.8:i 0.9:s 0.10: 0.11:n 0.12:i 0.13:c 0.14:e 0.15:! 0.16:"
+
+[2 1 False False 2 1] user2
+
+
+1.0:c 1.1:o 1.2:m 1.3:m 1.4:e 1.5:n 1.6:t 1.7:2 1.8: 1.9:w 1.10:i 1.11:t 1.12:h 1.13: 1.14:< 1.15:s 1.16:c 1.17:r 1.18:i 1.19:p 1.20:t 1.21:> 1.22:u 1.23:n 1.24:s 1.25:a 1.26:f 1.27:e 1.28:< 1.29:/ 1.30:s 1.31:c 1.32:r 1.33:i 1.34:p 1.35:t 1.36:> 1.37: 1.38:t 1.39:a 1.40:g 1.41:s 1.42: 1.43:i 1.44:n 1.45: 1.46:i 1.47:t
+
+[3 2 False True 1 0] user3
+
+
+2.0:< 2.1:b 2.2:> 2.3:h 2.4:e 2.5:l 2.6:l 2.7:o 2.8:! 2.9:< 2.10:/ 2.11:b 2.12:> 2.13: 2.14:t 2.15:h 2.16:e 2.17:r 2.18:e
+
+
+
+reversed
+'55 34 21 13 8 5 3 2 1 1 '
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/function_calls_wrapper.tpl b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/function_calls_wrapper.tpl
new file mode 100644
index 0000000..85b870a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/function_calls_wrapper.tpl
@@ -0,0 +1,11 @@
+{{ simple.func_add(simple.func_add(5, 15), simple.number) + 17 }}
+{{ simple.func_add_iface(simple.func_add_iface(5, 15), simple.number) + 17 }}
+{{ simple.func_variadic("hello") }}
+{{ simple.func_variadic("hello, %s", simple.name) }}
+{{ simple.func_variadic("%d + %d %s %d", 5, simple.number, "is", 49) }}
+{{ simple.func_variadic_sum_int() }}
+{{ simple.func_variadic_sum_int(1) }}
+{{ simple.func_variadic_sum_int(1, 19, 185) }}
+{{ simple.func_variadic_sum_int2() }}
+{{ simple.func_variadic_sum_int2(2) }}
+{{ simple.func_variadic_sum_int2(1, 7, 100) }}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/function_calls_wrapper.tpl.out b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/function_calls_wrapper.tpl.out
new file mode 100644
index 0000000..924e466
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/function_calls_wrapper.tpl.out
@@ -0,0 +1,11 @@
+79
+79
+hello
+hello, john doe
+5 + 42 is 49
+0
+1
+205
+0
+2
+108
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/if.tpl b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/if.tpl
new file mode 100644
index 0000000..c434c3f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/if.tpl
@@ -0,0 +1,17 @@
+{% if nothing %}false{% else %}true{% endif %}
+{% if simple %}simple != nil{% endif %}
+{% if simple.uint %}uint != 0{% endif %}
+{% if simple.float %}float != 0.0{% endif %}
+{% if !simple %}false{% else %}!simple{% endif %}
+{% if !simple.uint %}false{% else %}!simple.uint{% endif %}
+{% if !simple.float %}false{% else %}!simple.float{% endif %}
+{% if "Text" in complex.post %}text field in complex.post{% endif %}
+{% if 5 in simple.intmap %}5 in simple.intmap{% endif %}
+{% if !0.0 %}!0.0{% endif %}
+{% if !0 %}!0{% endif %}
+{% if simple.number == 43 %}no{% else %}42{% endif %}
+{% if simple.number < 42 %}false{% elif simple.number > 42 %}no{% elif simple.number >= 42 %}yes{% else %}no{% endif %}
+{% if simple.number < 42 %}false{% elif simple.number > 42 %}no{% elif simple.number != 42 %}no{% else %}yes{% endif %}
+{% if 0 %}!0{% elif nothing %}nothing{% else %}true{% endif %}
+{% if 0 %}!0{% elif simple.float %}simple.float{% else %}false{% endif %}
+{% if 0 %}!0{% elif !simple.float %}false{% elif "Text" in complex.post%}Elseif with no else{% endif %}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/if.tpl.out b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/if.tpl.out
new file mode 100644
index 0000000..bf931be
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/if.tpl.out
@@ -0,0 +1,17 @@
+true
+simple != nil
+uint != 0
+float != 0.0
+!simple
+!simple.uint
+!simple.float
+text field in complex.post
+5 in simple.intmap
+!0.0
+!0
+42
+yes
+yes
+true
+simple.float
+Elseif with no else
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/ifchanged.tpl b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/ifchanged.tpl
new file mode 100644
index 0000000..0282925
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/ifchanged.tpl
@@ -0,0 +1,9 @@
+{% for comment in complex.comments2 %}
+ {% ifchanged %}New comment from another user {{ comment.Author.Name }}{% endifchanged %}
+ {% ifchanged comment.Author.Validated %}
+ Validated changed to {{ comment.Author.Validated }}
+ {% else %}
+ Validated value not changed
+ {% endifchanged %}
+ {% ifchanged comment.Author.Name comment.Date %}Comment's author name or date changed{% endifchanged %}
+{% endfor %}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/ifchanged.tpl.out b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/ifchanged.tpl.out
new file mode 100644
index 0000000..3f186cb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/ifchanged.tpl.out
@@ -0,0 +1,18 @@
+
+ New comment from another user user1
+
+ Validated changed to True
+
+ Comment's author name or date changed
+
+
+
+ Validated value not changed
+
+ Comment's author name or date changed
+
+ New comment from another user user3
+
+ Validated changed to False
+
+ Comment's author name or date changed
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/includes.helper b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/includes.helper
new file mode 100644
index 0000000..b66db23
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/includes.helper
@@ -0,0 +1 @@
+I'm {{ what_am_i }}{{ number }}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/includes.tpl b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/includes.tpl
new file mode 100644
index 0000000..2394ee9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/includes.tpl
@@ -0,0 +1,7 @@
+Start '{% include "includes.helper" %}' End
+Start '{% include "includes.helper" if_exists %}' End
+Start '{% include "includes.helper" with what_am_i=simple.name only %}' End
+Start '{% include "includes.helper" with what_am_i=simple.name %}' End
+Start '{% include simple.included_file|lower with number=7 what_am_i="guest" %}' End
+Start '{% include "includes.helper.not_exists" if_exists %}' End
+Start '{% include simple.included_file_not_exists if_exists with number=7 what_am_i="guest" %}' End
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/includes.tpl.out b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/includes.tpl.out
new file mode 100644
index 0000000..61d9318
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/includes.tpl.out
@@ -0,0 +1,7 @@
+Start 'I'm 11' End
+Start 'I'm 11' End
+Start 'I'm john doe' End
+Start 'I'm john doe11' End
+Start 'I'm guest7' End
+Start '' End
+Start '' End
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/inheritance/base.tpl b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/inheritance/base.tpl
new file mode 100644
index 0000000..2b06d32
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/inheritance/base.tpl
@@ -0,0 +1,3 @@
+{% extends "inheritance2/skeleton.tpl" %}
+
+{% block body %}This is base's body{% block content %}Default content{% endblock %}{% endblock %}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/inheritance/base2.tpl b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/inheritance/base2.tpl
new file mode 100644
index 0000000..5ebad5f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/inheritance/base2.tpl
@@ -0,0 +1 @@
+{% include "doesnotexist.tpl" %}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/inheritance/cycle_include.tpl b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/inheritance/cycle_include.tpl
new file mode 100644
index 0000000..4b5d7b9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/inheritance/cycle_include.tpl
@@ -0,0 +1 @@
+Included '{{ cycleitem }}'.
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/inheritance/inheritance2/skeleton.tpl b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/inheritance/inheritance2/skeleton.tpl
new file mode 100644
index 0000000..c07cde6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/inheritance/inheritance2/skeleton.tpl
@@ -0,0 +1 @@
+Start#{% block body %}Default body{% endblock %}#End
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/issues.tpl b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/issues.tpl
new file mode 100644
index 0000000..e69de29
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/issues.tpl.out b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/issues.tpl.out
new file mode 100644
index 0000000..e69de29
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/lorem.tpl b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/lorem.tpl
new file mode 100644
index 0000000..f6b52dd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/lorem.tpl
@@ -0,0 +1,9 @@
+-----
+{% lorem %}
+-----
+{% lorem 10 %}
+-----
+{% lorem 3 p %}
+-----
+{% lorem 100 w %}
+-----
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/lorem.tpl.out b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/lorem.tpl.out
new file mode 100644
index 0000000..286a148
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/lorem.tpl.out
@@ -0,0 +1,20 @@
+-----
+Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
+-----
+Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
+Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat.
+Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat. Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi.
+Nam liber tempor cum soluta nobis eleifend option congue nihil imperdiet doming id quod mazim placerat facer possim assum. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat. Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat.
+Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis.
+At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, At accusam aliquyam diam diam dolore dolores duo eirmod eos erat, et nonumy sed tempor et et invidunt justo labore Stet clita ea et gubergren, kasd magna no rebum. sanctus sea sed takimata ut vero voluptua. est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat.
+Consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.
+Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
+Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat.
+Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat. Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi.
+-----
+Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
+Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat.
+Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat. Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi.
+-----
+Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum
+-----
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/macro-compilation.err b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/macro-compilation.err
new file mode 100644
index 0000000..baf3e6e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/macro-compilation.err
@@ -0,0 +1 @@
+{% macro test_override() export %}{% endmacro %}{% macro test_override() export %}{% endmacro %}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/macro-compilation.err.out b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/macro-compilation.err.out
new file mode 100644
index 0000000..a9ad8f1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/macro-compilation.err.out
@@ -0,0 +1 @@
+.*Another macro with name 'test_override' already exported.
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/macro-execution.err b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/macro-execution.err
new file mode 100644
index 0000000..ef7872c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/macro-execution.err
@@ -0,0 +1 @@
+{% macro number() export %}No number here.{% endmacro %}{{ number() }}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/macro-execution.err.out b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/macro-execution.err.out
new file mode 100644
index 0000000..4d4067b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/macro-execution.err.out
@@ -0,0 +1 @@
+.*Context key name 'number' clashes with macro 'number'.
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/macro.helper b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/macro.helper
new file mode 100644
index 0000000..d9809fd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/macro.helper
@@ -0,0 +1,2 @@
+{% macro imported_macro(foo) export %}Hey {{ foo }}!
{% endmacro %}
+{% macro imported_macro_void() export %}Hello mate!
{% endmacro %}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/macro.tpl b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/macro.tpl
new file mode 100644
index 0000000..3c4d931
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/macro.tpl
@@ -0,0 +1,30 @@
+Begin
+{% macro greetings(to, from=simple.name, name2="guest") %}
+Greetings to {{ to }} from {{ from }}. Howdy, {% if name2 == "guest" %}anonymous guest{% else %}{{ name2 }}{% endif %}!
+{% endmacro %}
+{{ greetings() }}
+{{ greetings(10) }}
+{{ greetings("john") }}
+{{ greetings("john", "michelle") }}
+{{ greetings("john", "michelle", "johann") }}
+{{ greetings("john", "michelle", "johann", "foobar") }}
+
+{% macro test2(loop, value) %}map[{{ loop.Counter0 }}] = {{ value }}{% endmacro %}
+{% for item in simple.misc_list %}
+{{ test2(forloop, item) }}{% endfor %}
+
+issue #39 (deactivate auto-escape of macros)
+{% macro html_test(name) %}
+Hello {{ name }}.
+{% endmacro %}
+{{ html_test("Max") }}
+
+Importing macros
+{% import "macro.helper" imported_macro, imported_macro as renamed_macro, imported_macro as html_test %}
+{{ imported_macro("User1") }}
+{{ renamed_macro("User2") }}
+{{ html_test("Max") }}
+
+Chaining macros{% import "macro2.helper" greeter_macro %}
+{{ greeter_macro() }}
+End
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/macro.tpl.out b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/macro.tpl.out
new file mode 100644
index 0000000..1bb9274
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/macro.tpl.out
@@ -0,0 +1,44 @@
+Begin
+
+
+Greetings to from john doe. Howdy, anonymous guest!
+
+
+Greetings to 10 from john doe. Howdy, anonymous guest!
+
+
+Greetings to john from john doe. Howdy, anonymous guest!
+
+
+Greetings to john from michelle. Howdy, anonymous guest!
+
+
+Greetings to john from michelle. Howdy, johann!
+
+[Error (where: execution) in template_tests/macro.tpl | Line 2 Col 4 near 'macro'] Macro 'greetings' called with too many arguments (4 instead of 3).
+
+
+
+map[0] = Hello
+map[1] = 99
+map[2] = 3.140000
+map[3] = good
+
+issue #39 (deactivate auto-escape of macros)
+
+
+Hello Max.
+
+
+Importing macros
+
+Hey User1!
+Hey User2!
+Hey Max!
+
+Chaining macros
+
+
+One greeting: Hey Dirk!
- Hello mate!
+
+End
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/macro2.helper b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/macro2.helper
new file mode 100644
index 0000000..faa89c3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/macro2.helper
@@ -0,0 +1,4 @@
+{% macro greeter_macro() export %}
+{% import "macro.helper" imported_macro, imported_macro_void %}
+One greeting: {{ imported_macro("Dirk") }} - {{ imported_macro_void() }}
+{% endmacro %}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/now.tpl b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/now.tpl
new file mode 100644
index 0000000..99ddae8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/now.tpl
@@ -0,0 +1,2 @@
+{# The 'fake' argument exists to have tests for the now-tag; it will set the time to a specific date instead of now #}
+{% now "Mon Jan 2 15:04:05 -0700 MST 2006" fake %}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/now.tpl.out b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/now.tpl.out
new file mode 100644
index 0000000..4a8a624
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/now.tpl.out
@@ -0,0 +1,2 @@
+
+Wed Feb 5 18:31:45 +0000 UTC 2014
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/pongo2ctx.tpl b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/pongo2ctx.tpl
new file mode 100644
index 0000000..a3715f7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/pongo2ctx.tpl
@@ -0,0 +1 @@
+{{ pongo2.version }}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/pongo2ctx.tpl.out b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/pongo2ctx.tpl.out
new file mode 100644
index 0000000..9001211
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/pongo2ctx.tpl.out
@@ -0,0 +1 @@
+dev
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/sandbox-compilation.err b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/sandbox-compilation.err
new file mode 100644
index 0000000..fd59c3b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/sandbox-compilation.err
@@ -0,0 +1,3 @@
+{{ "hello"|banned_filter }}
+{% banned_tag %}
+{% include "../../test_not_existent" %}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/sandbox-compilation.err.out b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/sandbox-compilation.err.out
new file mode 100644
index 0000000..cbc56ef
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/sandbox-compilation.err.out
@@ -0,0 +1,3 @@
+.*Usage of filter 'banned_filter' is not allowed \(sandbox restriction active\).
+.*Usage of tag 'banned_tag' is not allowed \(sandbox restriction active\).
+\[Error \(where: fromfile\) | Line 1 Col 12 near '../../test_not_existent'\] open : no such file or directory
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/sandbox.tpl b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/sandbox.tpl
new file mode 100644
index 0000000..5a58c75
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/sandbox.tpl
@@ -0,0 +1,3 @@
+{{ "hello"|unbanned_filter }}
+{% unbanned_tag %}
+{% include temp_file %}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/sandbox.tpl.out b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/sandbox.tpl.out
new file mode 100644
index 0000000..a60ed32
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/sandbox.tpl.out
@@ -0,0 +1,3 @@
+hello
+hello
+Hello from pongo2
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/set.tpl b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/set.tpl
new file mode 100644
index 0000000..09e7b2d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/set.tpl
@@ -0,0 +1,5 @@
+{% set new_var = "hello" %}{{ new_var }}
+{% block content %}{% set new_var = "world" %}{{ new_var }}{% endblock %}
+{{ new_var }}{% for item in simple.misc_list %}
+{% set new_var = item %}{{ new_var }}{% endfor %}
+{{ new_var }}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/set.tpl.out b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/set.tpl.out
new file mode 100644
index 0000000..bede53d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/set.tpl.out
@@ -0,0 +1,8 @@
+hello
+world
+world
+Hello
+99
+3.140000
+good
+world
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/spaceless.tpl b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/spaceless.tpl
new file mode 100644
index 0000000..5659e81
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/spaceless.tpl
@@ -0,0 +1,18 @@
+{% spaceless %}
+
+
+ This is a test! Mail me at
+
+
+ mail@example.tld
+
+
+
+
+
+ Yep!
+
+
+
+
+{% endspaceless %}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/spaceless.tpl.out b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/spaceless.tpl.out
new file mode 100644
index 0000000..5b33e2e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/spaceless.tpl.out
@@ -0,0 +1,11 @@
+
+
+ This is a test! Mail me at
+
+
+ mail@example.tld
+
+
+ Yep!
+
+
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/ssi.helper b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/ssi.helper
new file mode 100644
index 0000000..aa481d9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/ssi.helper
@@ -0,0 +1,2 @@
+{{ number }}
+{{ "hello" }}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/ssi.tpl b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/ssi.tpl
new file mode 100644
index 0000000..0159bf2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/ssi.tpl
@@ -0,0 +1,2 @@
+{% ssi "ssi.helper" %}
+{% ssi "ssi.helper" parsed %}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/ssi.tpl.out b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/ssi.tpl.out
new file mode 100644
index 0000000..bc9ede0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/ssi.tpl.out
@@ -0,0 +1,4 @@
+{{ number }}
+{{ "hello" }}
+11
+hello
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/tag_filter.tpl b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/tag_filter.tpl
new file mode 100644
index 0000000..2b11823
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/tag_filter.tpl
@@ -0,0 +1,3 @@
+{% filter lower %}This is a nice test; let's see whether it works. Foobar. {{ simple.xss }}{% endfilter %}
+
+{% filter truncatechars:10|lower|length %}This is a nice test; let's see whether it works. Foobar. {{ simple.number }}{% endfilter %}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/tag_filter.tpl.out b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/tag_filter.tpl.out
new file mode 100644
index 0000000..1e0bc0e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/tag_filter.tpl.out
@@ -0,0 +1,3 @@
+this is a nice test; let's see whether it works. foobar. <script>alert("uh oh");</script>
+
+10
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/tags-compilation.err b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/tags-compilation.err
new file mode 100644
index 0000000..3725c16
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/tags-compilation.err
@@ -0,0 +1,6 @@
+{% if true %}
+{% if (1) . %}{% endif %}
+{% block test %}{% block test %}{% endblock %}{% endblock %}
+{% block test %}{% block test %}{% endblock %}{% endblock test2 %}
+{% block test %}{% block test2 %}{% endblock xy %}{% endblock test %}
+{% block test %}{% block test2 %}{% endblock test2 test3 %}{% endblock test %}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/tags-compilation.err.out b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/tags-compilation.err.out
new file mode 100644
index 0000000..c6bb035
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/tags-compilation.err.out
@@ -0,0 +1,6 @@
+.*Unexpected EOF, expected tag elif or else or endif.
+.*If-condition is malformed.
+.*Block named 'test' already defined.*
+.*Name for 'endblock' must equal to 'block'\-tag's name \('test' != 'test2'\).
+.*Name for 'endblock' must equal to 'block'-tag's name \('test2' != 'xy'\).
+.*Either no or only one argument \(identifier\) allowed for 'endblock'.
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/template_sets.tpl b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/template_sets.tpl
new file mode 100644
index 0000000..60e195a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/template_sets.tpl
@@ -0,0 +1,2 @@
+Globals
+{{ this_is_a_global_variable }}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/template_sets.tpl.out b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/template_sets.tpl.out
new file mode 100644
index 0000000..f20279c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/template_sets.tpl.out
@@ -0,0 +1,2 @@
+Globals
+this is a global text
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/templatetag.tpl b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/templatetag.tpl
new file mode 100644
index 0000000..ef23991
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/templatetag.tpl
@@ -0,0 +1,4 @@
+{% templatetag openblock %} url 'entry_list' {% templatetag closeblock %}
+{% templatetag openvariable %}{% templatetag closevariable %}
+{% templatetag openbrace %}{% templatetag closebrace %}
+{% templatetag opencomment %}{% templatetag closecomment %}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/templatetag.tpl.out b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/templatetag.tpl.out
new file mode 100644
index 0000000..5c46ef8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/templatetag.tpl.out
@@ -0,0 +1,4 @@
+{% url 'entry_list' %}
+{{}}
+{}
+{##}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/variables.tpl b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/variables.tpl
new file mode 100644
index 0000000..2f78cae
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/variables.tpl
@@ -0,0 +1,13 @@
+{{ 1 }}
+{{ -5 }}
+{{ "hallo" }}
+{{ true }}
+{{ false }}
+{{ simple.uint }}
+{{ simple.nil }}
+{{ simple.str }}
+{{ simple.bool_false }}
+{{ simple.bool_true }}
+{{ simple.uint }}
+{{ simple.uint|integer }}
+{{ simple.uint|float }}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/variables.tpl.out b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/variables.tpl.out
new file mode 100644
index 0000000..bdc9569
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/variables.tpl.out
@@ -0,0 +1,13 @@
+1
+-5
+hallo
+True
+False
+8
+
+string
+False
+True
+8
+8
+8.000000
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/verbatim.tpl b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/verbatim.tpl
new file mode 100644
index 0000000..1d9cc88
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_tests/verbatim.tpl
@@ -0,0 +1,7 @@
+.{{ simple.number }}{% verbatim %}
+